billiard-3.5.0.3/0000755000175000017500000000000013132746522013375 5ustar omeromer00000000000000billiard-3.5.0.3/MANIFEST.in0000644000175000017500000000062213132743245015132 0ustar omeromer00000000000000include *.py include *.txt include *.rst include Makefile recursive-include Lib *.py recursive-include Modules *.c *.h recursive-include Doc *.rst *.py recursive-include funtests *.py recursive-include requirements *.txt recursive-include billiard *.py recursive-include t *.py recursive-exclude docs/_build * recursive-exclude * __pycache__ recursive-exclude * *.py[co] recursive-exclude * .*.sw* billiard-3.5.0.3/billiard/0000755000175000017500000000000013132746522015157 5ustar omeromer00000000000000billiard-3.5.0.3/billiard/connection.py0000644000175000017500000010012113132743245017662 0ustar omeromer00000000000000# -*- coding: utf-8 -*- # # A higher level module for using sockets (or Windows named pipes) # # multiprocessing/connection.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import errno import io import os import sys import socket import select import struct import tempfile import itertools from . import reduction from . import util from . import AuthenticationError, BufferTooShort from ._ext import _billiard from .compat import setblocking, send_offset from .five import monotonic from .reduction import ForkingPickler try: from .compat import _winapi except ImportError: if sys.platform == 'win32': raise _winapi = None else: if sys.platform == 'win32': WAIT_OBJECT_0 = _winapi.WAIT_OBJECT_0 try: WAIT_ABANDONED_0 = _winapi.WAIT_ABANDONED_0 except AttributeError: WAIT_ABANDONED_0 = 128 # noqa WAIT_TIMEOUT = _winapi.WAIT_TIMEOUT INFINITE = _winapi.INFINITE __all__ = ['Client', 'Listener', 'Pipe', 'wait'] is_pypy = hasattr(sys, 'pypy_version_info') # # # BUFSIZE = 8192 # A very generous timeout when it comes to local connections... CONNECTION_TIMEOUT = 20. _mmap_counter = itertools.count() default_family = 'AF_INET' families = ['AF_INET'] if hasattr(socket, 'AF_UNIX'): default_family = 'AF_UNIX' families += ['AF_UNIX'] if sys.platform == 'win32': default_family = 'AF_PIPE' families += ['AF_PIPE'] def _init_timeout(timeout=CONNECTION_TIMEOUT): return monotonic() + timeout def _check_timeout(t): return monotonic() > t # # # def arbitrary_address(family): ''' Return an arbitrary free address for the given family ''' if family == 'AF_INET': return ('localhost', 0) elif family == 'AF_UNIX': return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), next(_mmap_counter)), dir="") else: raise ValueError('unrecognized family') def _validate_family(family): ''' Checks if the family is valid for the current environment. ''' if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) if sys.platform == 'win32' and family == 'AF_UNIX': # double check if not hasattr(socket, family): raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' Return the types of the address This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' ''' if type(address) == tuple: return 'AF_INET' elif type(address) is str and address.startswith('\\\\'): return 'AF_PIPE' elif type(address) is str: return 'AF_UNIX' else: raise ValueError('address type of %r unrecognized' % address) # # Connection classes # class _SocketContainer(object): def __init__(self, sock): self.sock = sock class _ConnectionBase(object): _handle = None def __init__(self, handle, readable=True, writable=True): if isinstance(handle, _SocketContainer): self._socket = handle.sock # keep ref so not collected handle = handle.sock.fileno() handle = handle.__index__() if handle < 0: raise ValueError("invalid handle") if not readable and not writable: raise ValueError( "at least one of `readable` and `writable` must be True") self._handle = handle self._readable = readable self._writable = writable # XXX should we use util.Finalize instead of a __del__? def __del__(self): if self._handle is not None: self._close() def _check_closed(self): if self._handle is None: raise OSError("handle is closed") def _check_readable(self): if not self._readable: raise OSError("connection is write-only") def _check_writable(self): if not self._writable: raise OSError("connection is read-only") def _bad_message_length(self): if self._writable: self._readable = False else: self.close() raise OSError("bad message length") @property def closed(self): """True if the connection is closed""" return self._handle is None @property def readable(self): """True if the connection is readable""" return self._readable @property def writable(self): """True if the connection is writable""" return self._writable def fileno(self): """File descriptor or handle of the connection""" self._check_closed() return self._handle def close(self): """Close the connection""" if self._handle is not None: try: self._close() finally: self._handle = None def send_bytes(self, buf, offset=0, size=None): """Send the bytes data from a bytes-like object""" self._check_closed() self._check_writable() m = memoryview(buf) # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) if m.itemsize > 1: m = memoryview(bytes(m)) n = len(m) if offset < 0: raise ValueError("offset is negative") if n < offset: raise ValueError("buffer length < offset") if size is None: size = n - offset elif size < 0: raise ValueError("size is negative") elif offset + size > n: raise ValueError("buffer length < offset + size") self._send_bytes(m[offset:offset + size]) def send(self, obj): """Send a (picklable) object""" self._check_closed() self._check_writable() self._send_bytes(ForkingPickler.dumps(obj)) def recv_bytes(self, maxlength=None): """ Receive bytes data as a bytes object. """ self._check_closed() self._check_readable() if maxlength is not None and maxlength < 0: raise ValueError("negative maxlength") buf = self._recv_bytes(maxlength) if buf is None: self._bad_message_length() return buf.getvalue() def recv_bytes_into(self, buf, offset=0): """ Receive bytes data into a writeable bytes-like object. Return the number of bytes read. """ self._check_closed() self._check_readable() with memoryview(buf) as m: # Get bytesize of arbitrary buffer itemsize = m.itemsize bytesize = itemsize * len(m) if offset < 0: raise ValueError("negative offset") elif offset > bytesize: raise ValueError("offset too large") result = self._recv_bytes() size = result.tell() if bytesize < offset + size: raise BufferTooShort(result.getvalue()) # Message can fit in dest result.seek(0) result.readinto(m[ offset // itemsize:(offset + size) // itemsize ]) return size def recv(self): """Receive a (picklable) object""" self._check_closed() self._check_readable() buf = self._recv_bytes() return ForkingPickler.loadbuf(buf) def poll(self, timeout=0.0): """Whether there is any input available to be read""" self._check_closed() self._check_readable() return self._poll(timeout) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def send_offset(self, buf, offset): return send_offset(self.fileno(), buf, offset) def setblocking(self, blocking): setblocking(self.fileno(), blocking) if _winapi: class PipeConnection(_ConnectionBase): """ Connection class based on a Windows named pipe. Overlapped I/O is used, so the handles must have been created with FILE_FLAG_OVERLAPPED. """ _got_empty_message = False def _close(self, _CloseHandle=_winapi.CloseHandle): _CloseHandle(self._handle) def _send_bytes(self, buf): ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nwritten, err = ov.GetOverlappedResult(True) assert err == 0 assert nwritten == len(buf) def _recv_bytes(self, maxsize=None): if self._got_empty_message: self._got_empty_message = False return io.BytesIO() else: bsize = 128 if maxsize is None else min(maxsize, 128) try: ov, err = _winapi.ReadFile( self._handle, bsize, overlapped=True, ) try: if err == _winapi.ERROR_IO_PENDING: waitres = _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) assert waitres == WAIT_OBJECT_0 except: ov.cancel() raise finally: nread, err = ov.GetOverlappedResult(True) if err == 0: f = io.BytesIO() f.write(ov.getbuffer()) return f elif err == _winapi.ERROR_MORE_DATA: return self._get_more_data(ov, maxsize) except OSError as e: if e.winerror == _winapi.ERROR_BROKEN_PIPE: raise EOFError else: raise raise RuntimeError( "shouldn't get here; expected KeyboardInterrupt") def _poll(self, timeout): if (self._got_empty_message or _winapi.PeekNamedPipe(self._handle)[0] != 0): return True return bool(wait([self], timeout)) def _get_more_data(self, ov, maxsize): buf = ov.getbuffer() f = io.BytesIO() f.write(buf) left = _winapi.PeekNamedPipe(self._handle)[1] assert left > 0 if maxsize is not None and len(buf) + left > maxsize: self._bad_message_length() ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) rbytes, err = ov.GetOverlappedResult(True) assert err == 0 assert rbytes == left f.write(ov.getbuffer()) return f class Connection(_ConnectionBase): """ Connection class based on an arbitrary file descriptor (Unix only), or a socket handle (Windows). """ if _winapi: def _close(self, _close=_billiard.closesocket): _close(self._handle) _write = _billiard.send _read = _billiard.recv else: def _close(self, _close=os.close): _close(self._handle) _write = os.write _read = os.read def _send(self, buf, write=_write): remaining = len(buf) while True: try: n = write(self._handle, buf) except (OSError, IOError, socket.error) as exc: if getattr(exc, 'errno', None) != errno.EINTR: raise else: remaining -= n if remaining == 0: break buf = buf[n:] def _recv(self, size, read=_read): buf = io.BytesIO() handle = self._handle remaining = size while remaining > 0: try: chunk = read(handle, remaining) except (OSError, IOError, socket.error) as exc: if getattr(exc, 'errno', None) != errno.EINTR: raise else: n = len(chunk) if n == 0: if remaining == size: raise EOFError else: raise OSError("got end of file during message") buf.write(chunk) remaining -= n return buf def _send_bytes(self, buf, memoryview=memoryview): n = len(buf) # For wire compatibility with 3.2 and lower header = struct.pack("!i", n) if n > 16384: # The payload is large so Nagle's algorithm won't be triggered # and we'd better avoid the cost of concatenation. self._send(header) self._send(buf) else: # Issue #20540: concatenate before sending, to avoid delays due # to Nagle's algorithm on a TCP socket. # Also note we want to avoid sending a 0-length buffer separately, # to avoid "broken pipe" errors if the other end closed the pipe. if isinstance(buf, memoryview): buf = buf.tobytes() self._send(header + buf) def _recv_bytes(self, maxsize=None): buf = self._recv(4) size, = struct.unpack("!i", buf.getvalue()) if maxsize is not None and size > maxsize: return None return self._recv(size) def _poll(self, timeout): r = wait([self], timeout) return bool(r) # # Public functions # class Listener(object): ''' Returns a listener object. This is a wrapper for a bound socket which is 'listening' for connections, or for a Windows named pipe. ''' def __init__(self, address=None, family=None, backlog=1, authkey=None): family = (family or (address and address_type(address)) or default_family) address = address or arbitrary_address(family) _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: self._listener = SocketListener(address, family, backlog) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') self._authkey = authkey def accept(self): ''' Accept a connection on the bound socket or named pipe of `self`. Returns a `Connection` object. ''' if self._listener is None: raise OSError('listener is closed') c = self._listener.accept() if self._authkey: deliver_challenge(c, self._authkey) answer_challenge(c, self._authkey) return c def close(self): ''' Close the bound socket or named pipe of `self`. ''' listener = self._listener if listener is not None: self._listener = None listener.close() address = property(lambda self: self._listener._address) last_accepted = property(lambda self: self._listener._last_accepted) def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() def Client(address, family=None, authkey=None): ''' Returns a connection to the address of a `Listener` ''' family = family or address_type(address) _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: c = SocketClient(address) if authkey is not None and not isinstance(authkey, bytes): raise TypeError('authkey should be a byte string') if authkey is not None: answer_challenge(c, authkey) deliver_challenge(c, authkey) return c def detach(sock): if hasattr(sock, 'detach'): return sock.detach() # older socket lib does not have detach. We'll keep a reference around # so that it does not get garbage collected. return _SocketContainer(sock) if sys.platform != 'win32': def Pipe(duplex=True, rnonblock=False, wnonblock=False): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() s1.setblocking(not rnonblock) s2.setblocking(not wnonblock) c1 = Connection(detach(s1)) c2 = Connection(detach(s2)) else: fd1, fd2 = os.pipe() if rnonblock: setblocking(fd1, 0) if wnonblock: setblocking(fd2, 0) c1 = Connection(fd1, writable=False) c2 = Connection(fd2, readable=False) return c1, c2 else: def Pipe(duplex=True, rnonblock=False, wnonblock=False): ''' Returns pair of connection objects at either end of a pipe ''' assert not rnonblock, 'rnonblock not supported on windows' assert not wnonblock, 'wnonblock not supported on windows' address = arbitrary_address('AF_PIPE') if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = BUFSIZE, BUFSIZE else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, BUFSIZE h1 = _winapi.CreateNamedPipe( address, openmode | _winapi.FILE_FLAG_OVERLAPPED | _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, # default security descriptor: the handle cannot be inherited _winapi.NULL ) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) _winapi.SetNamedPipeHandleState( h2, _winapi.PIPE_READMODE_MESSAGE, None, None ) overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) _, err = overlapped.GetOverlappedResult(True) assert err == 0 c1 = PipeConnection(h1, writable=duplex) c2 = PipeConnection(h2, readable=duplex) return c1, c2 # # Definitions for connections based on sockets # class SocketListener(object): ''' Representation of a socket which is bound to an address and listening ''' def __init__(self, address, family, backlog=1): self._socket = socket.socket(getattr(socket, family)) try: # SO_REUSEADDR has different semantics on Windows (issue #2550). if os.name == 'posix': self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.setblocking(True) self._socket.bind(address) self._socket.listen(backlog) self._address = self._socket.getsockname() except OSError: self._socket.close() raise self._family = family self._last_accepted = None if family == 'AF_UNIX': self._unlink = util.Finalize( self, os.unlink, args=(address,), exitpriority=0 ) else: self._unlink = None def accept(self): while True: try: s, self._last_accepted = self._socket.accept() except (OSError, IOError, socket.error) as exc: if getattr(exc, 'errno', None) != errno.EINTR: raise else: break s.setblocking(True) return Connection(detach(s)) def close(self): try: self._socket.close() finally: unlink = self._unlink if unlink is not None: self._unlink = None unlink() def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) s = socket.socket(getattr(socket, family)) s.setblocking(True) s.connect(address) return Connection(detach(s)) # # Definitions for connections based on named pipes # if sys.platform == 'win32': class PipeListener(object): ''' Representation of a named pipe ''' def __init__(self, address, backlog=None): self._address = address self._handle_queue = [self._new_handle(first=True)] self._last_accepted = None util.sub_debug('listener created with address=%r', self._address) self.close = util.Finalize( self, PipeListener._finalize_pipe_listener, args=(self._handle_queue, self._address), exitpriority=0 ) def _new_handle(self, first=False): flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE return _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL ) def accept(self): self._handle_queue.append(self._new_handle()) handle = self._handle_queue.pop(0) try: ov = _winapi.ConnectNamedPipe(handle, overlapped=True) except OSError as e: if e.winerror != _winapi.ERROR_NO_DATA: raise # ERROR_NO_DATA can occur if a client has already connected, # written data and then disconnected -- see Issue 14725. else: try: _winapi.WaitForMultipleObjects( [ov.event], False, INFINITE) except: ov.cancel() _winapi.CloseHandle(handle) raise finally: _, err = ov.GetOverlappedResult(True) assert err == 0 return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): util.sub_debug('closing listener with address=%r', address) for handle in queue: _winapi.CloseHandle(handle) def PipeClient(address, _ignore=(_winapi.ERROR_SEM_TIMEOUT, _winapi.ERROR_PIPE_BUSY)): ''' Return a connection object connected to the pipe given by `address` ''' t = _init_timeout() while 1: try: _winapi.WaitNamedPipe(address, 1000) h = _winapi.CreateFile( address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, 0, _winapi.NULL, _winapi.OPEN_EXISTING, _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL ) except OSError as e: if e.winerror not in _ignore or _check_timeout(t): raise else: break else: raise _winapi.SetNamedPipeHandleState( h, _winapi.PIPE_READMODE_MESSAGE, None, None ) return PipeConnection(h) # # Authentication stuff # MESSAGE_LENGTH = 20 CHALLENGE = b'#CHALLENGE#' WELCOME = b'#WELCOME#' FAILURE = b'#FAILURE#' def deliver_challenge(connection, authkey): import hmac assert isinstance(authkey, bytes) message = os.urandom(MESSAGE_LENGTH) connection.send_bytes(CHALLENGE + message) digest = hmac.new(authkey, message, 'md5').digest() response = connection.recv_bytes(256) # reject large message if response == digest: connection.send_bytes(WELCOME) else: connection.send_bytes(FAILURE) raise AuthenticationError('digest received was wrong') def answer_challenge(connection, authkey): import hmac assert isinstance(authkey, bytes) message = connection.recv_bytes(256) # reject large message assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message message = message[len(CHALLENGE):] digest = hmac.new(authkey, message, 'md5').digest() connection.send_bytes(digest) response = connection.recv_bytes(256) # reject large message if response != WELCOME: raise AuthenticationError('digest sent was rejected') # # Support for using xmlrpclib for serialization # class ConnectionWrapper(object): def __init__(self, conn, dumps, loads): self._conn = conn self._dumps = dumps self._loads = loads for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): obj = getattr(conn, attr) setattr(self, attr, obj) def send(self, obj): s = self._dumps(obj) self._conn.send_bytes(s) def recv(self): s = self._conn.recv_bytes() return self._loads(s) def _xml_dumps(obj): o = xmlrpclib.dumps((obj, ), None, None, None, 1) # noqa return o.encode('utf-8') def _xml_loads(s): (obj,), method = xmlrpclib.loads(s.decode('utf-8')) # noqa return obj class XmlListener(Listener): def accept(self): global xmlrpclib import xmlrpc.client as xmlrpclib # noqa obj = Listener.accept(self) return ConnectionWrapper(obj, _xml_dumps, _xml_loads) def XmlClient(*args, **kwds): global xmlrpclib import xmlrpc.client as xmlrpclib # noqa return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) # # Wait # if sys.platform == 'win32': def _exhaustive_wait(handles, timeout): # Return ALL handles which are currently signaled. (Only # returning the first signaled might create starvation issues.) L = list(handles) ready = [] while L: res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): res -= WAIT_OBJECT_0 elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): res -= WAIT_ABANDONED_0 else: raise RuntimeError('Should not get here') ready.append(L[res]) L = L[res + 1:] timeout = 0 return ready _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} def wait(object_list, timeout=None): ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is None: timeout = INFINITE elif timeout < 0: timeout = 0 else: timeout = int(timeout * 1000 + 0.5) object_list = list(object_list) waithandle_to_obj = {} ov_list = [] ready_objects = set() ready_handles = set() try: for o in object_list: try: fileno = getattr(o, 'fileno') except AttributeError: waithandle_to_obj[o.__index__()] = o else: # start an overlapped read of length zero try: ov, err = _winapi.ReadFile(fileno(), 0, True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err == _winapi.ERROR_IO_PENDING: ov_list.append(ov) waithandle_to_obj[ov.event] = o else: # If o.fileno() is an overlapped pipe handle and # err == 0 then there is a zero length message # in the pipe, but it HAS NOT been consumed. ready_objects.add(o) timeout = 0 ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) finally: # request that overlapped reads stop for ov in ov_list: ov.cancel() # wait for all overlapped reads to stop for ov in ov_list: try: _, err = ov.GetOverlappedResult(True) except OSError as e: err = e.winerror if err not in _ready_errors: raise if err != _winapi.ERROR_OPERATION_ABORTED: o = waithandle_to_obj[ov.event] ready_objects.add(o) if err == 0: # If o.fileno() is an overlapped pipe handle then # a zero length message HAS been consumed. if hasattr(o, '_got_empty_message'): o._got_empty_message = True ready_objects.update(waithandle_to_obj[h] for h in ready_handles) return [p for p in object_list if p in ready_objects] else: if hasattr(select, 'poll'): def _poll(fds, timeout): if timeout is not None: timeout = int(timeout * 1000) # timeout is in milliseconds fd_map = {} pollster = select.poll() for fd in fds: pollster.register(fd, select.POLLIN) if hasattr(fd, 'fileno'): fd_map[fd.fileno()] = fd else: fd_map[fd] = fd ls = [] for fd, event in pollster.poll(timeout): if event & select.POLLNVAL: raise ValueError('invalid file descriptor %i' % fd) ls.append(fd_map[fd]) return ls else: def _poll(fds, timeout): # noqa return select.select(fds, [], [], timeout)[0] def wait(object_list, timeout=None): # noqa ''' Wait till an object in object_list is ready/readable. Returns list of those objects in object_list which are ready/readable. ''' if timeout is not None: if timeout <= 0: return _poll(object_list, 0) else: deadline = monotonic() + timeout while True: try: return _poll(object_list, timeout) except (OSError, IOError, socket.error) as e: if e.errno != errno.EINTR: raise if timeout is not None: timeout = deadline - monotonic() # # Make connection and socket objects sharable if possible # if sys.platform == 'win32': def reduce_connection(conn): handle = conn.fileno() with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: from . import resource_sharer ds = resource_sharer.DupSocket(s) return rebuild_connection, (ds, conn.readable, conn.writable) def rebuild_connection(ds, readable, writable): sock = ds.detach() return Connection(detach(sock), readable, writable) reduction.register(Connection, reduce_connection) def reduce_pipe_connection(conn): access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) dh = reduction.DupHandle(conn.fileno(), access) return rebuild_pipe_connection, (dh, conn.readable, conn.writable) def rebuild_pipe_connection(dh, readable, writable): return PipeConnection(detach(dh), readable, writable) reduction.register(PipeConnection, reduce_pipe_connection) else: def reduce_connection(conn): df = reduction.DupFd(conn.fileno()) return rebuild_connection, (df, conn.readable, conn.writable) def rebuild_connection(df, readable, writable): return Connection(detach(df), readable, writable) reduction.register(Connection, reduce_connection) billiard-3.5.0.3/billiard/forkserver.py0000644000175000017500000002023213132743245017717 0ustar omeromer00000000000000from __future__ import absolute_import, print_function import errno import os import selectors import signal import socket import struct import sys import threading from . import connection from . import process from . import reduction from . import semaphore_tracker from . import spawn from . import util from .compat import spawnv_passfds __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 'set_forkserver_preload'] # # # MAXFDS_TO_SEND = 256 UNSIGNED_STRUCT = struct.Struct('Q') # large enough for pid_t # # Forkserver class # class ForkServer(object): def __init__(self): self._forkserver_address = None self._forkserver_alive_fd = None self._inherited_fds = None self._lock = threading.Lock() self._preload_modules = ['__main__'] def set_forkserver_preload(self, modules_names): '''Set list of module names to try to load in forkserver process.''' if not all(type(mod) is str for mod in self._preload_modules): raise TypeError('module_names must be a list of strings') self._preload_modules = modules_names def get_inherited_fds(self): '''Return list of fds inherited from parent process. This returns None if the current process was not started by fork server. ''' return self._inherited_fds def connect_to_new_process(self, fds): '''Request forkserver to create a child process. Returns a pair of fds (status_r, data_w). The calling process can read the child process's pid and (eventually) its returncode from status_r. The calling process should write to data_w the pickled preparation and process data. ''' self.ensure_running() if len(fds) + 4 >= MAXFDS_TO_SEND: raise ValueError('too many fds') with socket.socket(socket.AF_UNIX) as client: client.connect(self._forkserver_address) parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() allfds = [child_r, child_w, self._forkserver_alive_fd, semaphore_tracker.getfd()] allfds += fds try: reduction.sendfds(client, allfds) return parent_r, parent_w except: os.close(parent_r) os.close(parent_w) raise finally: os.close(child_r) os.close(child_w) def ensure_running(self): '''Make sure that a fork server is running. This can be called from any process. Note that usually a child process will just reuse the forkserver started by its parent, so ensure_running() will do nothing. ''' with self._lock: semaphore_tracker.ensure_running() if self._forkserver_alive_fd is not None: return cmd = ('from billiard.forkserver import main; ' + 'main(%d, %d, %r, **%r)') if self._preload_modules: desired_keys = {'main_path', 'sys_path'} data = spawn.get_preparation_data('ignore') data = { x: y for (x, y) in data.items() if x in desired_keys } else: data = {} with socket.socket(socket.AF_UNIX) as listener: address = connection.arbitrary_address('AF_UNIX') listener.bind(address) os.chmod(address, 0o600) listener.listen() # all client processes own the write end of the "alive" pipe; # when they all terminate the read end becomes ready. alive_r, alive_w = os.pipe() try: fds_to_pass = [listener.fileno(), alive_r] cmd %= (listener.fileno(), alive_r, self._preload_modules, data) exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd] spawnv_passfds(exe, args, fds_to_pass) except: os.close(alive_w) raise finally: os.close(alive_r) self._forkserver_address = address self._forkserver_alive_fd = alive_w # # # def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass # close sys.stdin if sys.stdin is not None: try: sys.stdin.close() sys.stdin = open(os.devnull) except (OSError, ValueError): pass # ignoring SIGCHLD means no need to reap zombie processes handler = signal.signal(signal.SIGCHLD, signal.SIG_IGN) with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'' raise SystemExit assert listener in rfds with listener.accept()[0] as s: code = 1 if os.fork() == 0: try: _serve_one(s, listener, alive_r, handler) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) except OSError as e: if e.errno != errno.ECONNABORTED: raise def __unpack_fds(child_r, child_w, alive, stfd, *inherited): return child_r, child_w, alive, stfd, inherited def _serve_one(s, listener, alive_r, handler): # close unnecessary stuff and reset SIGCHLD handler listener.close() os.close(alive_r) signal.signal(signal.SIGCHLD, handler) # receive fds from parent process fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) s.close() assert len(fds) <= MAXFDS_TO_SEND (child_r, child_w, _forkserver._forkserver_alive_fd, stfd, _forkserver._inherited_fds) = __unpack_fds(*fds) semaphore_tracker._semaphore_tracker._fd = stfd # send pid to client processes write_unsigned(child_w, os.getpid()) # reseed random number generator if 'random' in sys.modules: import random random.seed() # run process object received over pipe code = spawn._main(child_r) # write the exit code to the pipe write_unsigned(child_w, code) # # Read and write unsigned numbers # def read_unsigned(fd): data = b'' length = UNSIGNED_STRUCT.size while len(data) < length: s = os.read(fd, length - len(data)) if not s: raise EOFError('unexpected EOF') data += s return UNSIGNED_STRUCT.unpack(data)[0] def write_unsigned(fd, n): msg = UNSIGNED_STRUCT.pack(n) while msg: nbytes = os.write(fd, msg) if nbytes == 0: raise RuntimeError('should not get here') msg = msg[nbytes:] # # # _forkserver = ForkServer() ensure_running = _forkserver.ensure_running get_inherited_fds = _forkserver.get_inherited_fds connect_to_new_process = _forkserver.connect_to_new_process set_forkserver_preload = _forkserver.set_forkserver_preload billiard-3.5.0.3/billiard/exceptions.py0000644000175000017500000000244013132743245017711 0ustar omeromer00000000000000from __future__ import absolute_import try: from multiprocessing import ( ProcessError, BufferTooShort, TimeoutError, AuthenticationError, ) except ImportError: class ProcessError(Exception): # noqa pass class BufferTooShort(ProcessError): # noqa pass class TimeoutError(ProcessError): # noqa pass class AuthenticationError(ProcessError): # noqa pass class TimeLimitExceeded(Exception): """The time limit has been exceeded and the job has been terminated.""" def __str__(self): return "TimeLimitExceeded%s" % (self.args, ) class SoftTimeLimitExceeded(Exception): """The soft time limit has been exceeded. This exception is raised to give the task a chance to clean up.""" def __str__(self): return "SoftTimeLimitExceeded%s" % (self.args, ) class WorkerLostError(Exception): """The worker processing a job has exited prematurely.""" class Terminated(Exception): """The worker processing a job has been terminated by user request.""" class RestartFreqExceeded(Exception): """Restarts too fast.""" class CoroStop(Exception): """Coroutine exit, as opposed to StopIteration which may mean it should be restarted.""" pass billiard-3.5.0.3/billiard/five.py0000644000175000017500000001245513132743245016470 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """ celery.five ~~~~~~~~~~~ Compatibility implementations of features only available in newer Python versions. """ from __future__ import absolute_import # ############# py3k ######################################################### import sys PY3 = sys.version_info[0] == 3 try: reload = reload # noqa except NameError: # pragma: no cover from imp import reload # noqa try: from UserList import UserList # noqa except ImportError: # pragma: no cover from collections import UserList # noqa try: from UserDict import UserDict # noqa except ImportError: # pragma: no cover from collections import UserDict # noqa # ############# time.monotonic ############################################### if sys.version_info < (3, 3): import platform SYSTEM = platform.system() try: import ctypes except ImportError: # pragma: no cover ctypes = None # noqa if SYSTEM == 'Darwin' and ctypes is not None: from ctypes.util import find_library libSystem = ctypes.CDLL(find_library('libSystem.dylib')) CoreServices = ctypes.CDLL(find_library('CoreServices'), use_errno=True) mach_absolute_time = libSystem.mach_absolute_time mach_absolute_time.restype = ctypes.c_uint64 absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds absolute_to_nanoseconds.restype = ctypes.c_uint64 absolute_to_nanoseconds.argtypes = [ctypes.c_uint64] def _monotonic(): return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9 elif SYSTEM == 'Linux' and ctypes is not None: # from stackoverflow: # questions/1205722/how-do-i-get-monotonic-time-durations-in-python import ctypes import os CLOCK_MONOTONIC = 1 # see class timespec(ctypes.Structure): _fields_ = [ ('tv_sec', ctypes.c_long), ('tv_nsec', ctypes.c_long), ] librt = ctypes.CDLL('librt.so.1', use_errno=True) clock_gettime = librt.clock_gettime clock_gettime.argtypes = [ ctypes.c_int, ctypes.POINTER(timespec), ] def _monotonic(): # noqa t = timespec() if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0: errno_ = ctypes.get_errno() raise OSError(errno_, os.strerror(errno_)) return t.tv_sec + t.tv_nsec * 1e-9 else: from time import time as _monotonic try: from time import monotonic except ImportError: monotonic = _monotonic # noqa if PY3: import builtins from queue import Queue, Empty, Full from itertools import zip_longest from io import StringIO, BytesIO map = map string = str string_t = str long_t = int text_t = str range = range int_types = (int, ) def items(d): return d.items() def keys(d): return d.keys() def values(d): return d.values() def nextfun(it): return it.__next__ exec_ = getattr(builtins, 'exec') def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value class WhateverIO(StringIO): def write(self, data): if isinstance(data, bytes): data = data.encode() StringIO.write(self, data) else: import __builtin__ as builtins # noqa from Queue import Queue, Empty, Full # noqa from itertools import imap as map, izip_longest as zip_longest # noqa from StringIO import StringIO # noqa string = unicode # noqa string_t = basestring # noqa text_t = unicode long_t = long # noqa range = xrange int_types = (int, long) def items(d): # noqa return d.iteritems() def keys(d): # noqa return d.iterkeys() def values(d): # noqa return d.itervalues() def nextfun(it): # noqa return it.next def exec_(code, globs=None, locs=None): """Execute code in a namespace.""" if globs is None: frame = sys._getframe(1) globs = frame.f_globals if locs is None: locs = frame.f_locals del frame elif locs is None: locs = globs exec("""exec code in globs, locs""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") BytesIO = WhateverIO = StringIO # noqa def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): """Class decorator to set metaclass. Works with both Python 2 and Python 3 and it does not add an extra class in the lookup order like ``six.with_metaclass`` does (that is -- it copies the original class instead of using inheritance). """ def _clone_with_metaclass(Class): attrs = dict((key, value) for key, value in items(vars(Class)) if key not in skip_attrs) return Type(Class.__name__, Class.__bases__, attrs) return _clone_with_metaclass billiard-3.5.0.3/billiard/_ext.py0000644000175000017500000000162013132743245016466 0ustar omeromer00000000000000from __future__ import absolute_import import sys supports_exec = True from .compat import _winapi as win32 # noqa if sys.platform.startswith("java"): _billiard = None else: try: import _billiard # noqa except ImportError: import _multiprocessing as _billiard # noqa supports_exec = False def ensure_multiprocessing(): if _billiard is None: raise NotImplementedError("multiprocessing not supported") def ensure_SemLock(): try: from _billiard import SemLock # noqa except ImportError: try: from _multiprocessing import SemLock # noqa except ImportError: raise ImportError("""\ This platform lacks a functioning sem_open implementation, therefore, the required synchronization primitives needed will not function, see issue 3770.""") billiard-3.5.0.3/billiard/managers.py0000644000175000017500000010775113132743245017340 0ustar omeromer00000000000000# # Module providing the `SyncManager` class for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import # # Imports # import sys import threading import array from traceback import format_exc from . import connection from . import context from . import pool from . import process from . import reduction from . import util from . import get_context from .five import Queue, items, monotonic __all__ = ['BaseManager', 'SyncManager', 'BaseProxy', 'Token'] PY3 = sys.version_info[0] == 3 # # Register some things for pickling # if PY3: def reduce_array(a): return array.array, (a.typecode, a.tobytes()) else: def reduce_array(a): # noqa return array.array, (a.typecode, a.tostring()) reduction.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items', 'keys', 'values')] if view_types[0] is not list: # only needed in Py3.0 def rebuild_as_list(obj): return list, (list(obj), ) for view_type in view_types: reduction.register(view_type, rebuild_as_list) # # Type for identifying shared objects # class Token(object): ''' Type to uniquely indentify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return '%s(typeid=%r, address=%r, id=%r)' % \ (self.__class__.__name__, self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind == '#TRACEBACK': assert type(result) is str return RemoteError(result) elif kind == '#UNSERIALIZABLE': assert type(result) is str return RemoteError('Unserializable message: %s\n' % result) else: return ValueError('Unrecognized message type') class RemoteError(Exception): def __str__(self): return ('\n' + '-' * 75 + '\n' + str(self.args[0]) + '-' * 75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if callable(func): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): assert isinstance(authkey, bytes) self.registry = registry self.authkey = process.AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.mutex = threading.RLock() def serve_forever(self): ''' Run the server forever ''' self.stop_event = threading.Event() process.current_process()._manager_server = self try: accepter = threading.Thread(target=self.accepter) accepter.daemon = True accepter.start() try: while not self.stop_event.is_set(): self.stop_event.wait(1) except (KeyboardInterrupt, SystemExit): pass finally: if sys.stdout != sys.__stdout__: util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ sys.exit(0) def accepter(self): while True: try: c = self.listener.accept() except OSError: continue t = threading.Thread(target=self.handle_request, args=(c, )) t.daemon = True t.start() def handle_request(self, c): ''' Handle a new connection ''' funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception as exc: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', exc) c.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop_event.is_set(): try: methodname = obj = None request = recv() ident, methodname, args, kwds = request obj, exposed, gettypeid = id_to_obj[ident] if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % ( methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception as exc: msg = ('#ERROR', exc) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception: send(('#UNSERIALIZABLE', repr(msg))) except Exception as exc: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', exc) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__': fallback_str, '__repr__': fallback_repr, '#GETVALUE': fallback_getvalue, } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' with self.mutex: result = [] keys = list(self.id_to_obj.keys()) keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) def number_of_objects(self, c): ''' Number of shared objects ''' return len(self.id_to_obj) - 1 # don't count ident='0' def shutdown(self, c): ''' Shutdown this process ''' try: util.debug('Manager received shutdown message') c.send(('#RETURN', None)) except: import traceback traceback.print_exc() finally: self.stop_event.set() def create(self, c, typeid, *args, **kwds): ''' Create a new shared object and return its id ''' with self.mutex: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: assert len(args) == 1 and not kwds obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: assert type(method_to_typeid) is dict exposed = list(exposed) + list(method_to_typeid) # convert to string because xmlrpclib # only has 32 bit signed integers ident = '%x' % id(obj) util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 # increment the reference count immediately, to avoid # this object being garbage collected before a Proxy # object for it can be created. The caller of create() # is responsible for doing a decref once the Proxy object # has been created. self.incref(c, ident) return ident, tuple(exposed) def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): with self.mutex: self.id_to_refcount[ident] += 1 def decref(self, c, ident): with self.mutex: assert self.id_to_refcount[ident] >= 1 self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_obj[ident], self.id_to_refcount[ident] util.debug('disposing of obj with id %r', ident) # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { 'pickle': (connection.Listener, connection.Client), 'xmlrpclib': (connection.XmlListener, connection.XmlClient), } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle', ctx=None): if authkey is None: authkey = process.current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = process.AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] self._ctx = ctx or get_context() def __reduce__(self): return (type(self).from_address, (self._address, self._authkey, self._serializer)) def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' assert self._state.value == State.INITIAL return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' assert self._state.value == State.INITIAL if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,) + args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' if self._process is not None: self._process.join(timeout) if not self._process.is_alive(): self._process = None def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): if self._state.value == State.INITIAL: self.start() assert self._state.value == State.STARTED return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=1.0) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=0.1) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass address = property(lambda self: self._address) @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = ( method_to_typeid or getattr(proxytype, '_method_to_typeid_', None) ) if method_to_typeid: for key, value in items(method_to_typeid): assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True): with BaseProxy._mutex: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] if authkey is not None: self._authkey = process.AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = process.current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referrent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception as exc: util.debug('... decref failed %s', exc) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception as exc: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s', exc) def __reduce__(self): kwds = {} if context.get_spawning_popen() is not None: kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %#x>' % \ (type(self).__name__, self._token.typeid, id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. If possible the shared object is returned, or otherwise a proxy for it. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: return server.id_to_obj[token.id][0] else: incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return an proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec('''def %s(self, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = process.current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)' % (type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): if sys.version_info[0] == 3: _exposed = ('__next__', 'send', 'throw', 'close') else: _exposed_ = ('__next__', 'next', 'send', 'throw', 'close') def next(self, *args): return self._callmethod('next', args) def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True, timeout=None): args = (blocking, ) if timeout is None else (blocking, timeout) return self._callmethod('acquire', args) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self): return self._callmethod('notify') def notify_all(self): return self._callmethod('notify_all') def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = monotonic() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - monotonic() if waittime <= 0: break self.wait(waittime) result = predicate() return result class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class BarrierProxy(BaseProxy): _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') def wait(self, timeout=None): return self._callmethod('wait', (timeout, )) def abort(self): return self._callmethod('abort') def reset(self): return self._callmethod('reset') @property def parties(self): return self._callmethod('__getattribute__', ('parties', )) @property def n_waiting(self): return self._callmethod('__getattribute__', ('n_waiting', )) @property def broken(self): return self._callmethod('__getattribute__', ('broken', )) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) _ListProxy_Attributes = ( '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__', ) if not PY3: _ListProxy_Attributes += ('__getslice__', '__setslice__', '__delslice__') BaseListProxy = MakeProxyType('BaseListProxy', _ListProxy_Attributes) class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values', )) _ArrayProxy_Attributes = ( '__len__', '__getitem__', '__setitem__', ) if not PY3: _ArrayProxy_Attributes += ('__getslice__', '__setslice__') ArrayProxy = MakeProxyType('ArrayProxy', _ArrayProxy_Attributes) BasePoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', )) BasePoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'starmap_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator', } class PoolProxy(BasePoolProxy): def __enter__(self): return self def __exit__(self, *exc_info): self.terminate() # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `billiard.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', Queue) SyncManager.register('JoinableQueue', Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) if hasattr(threading, 'Barrier'): # PY3 SyncManager.register('Barrier', threading.Barrier, BarrierProxy) SyncManager.register('Pool', pool.Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False) billiard-3.5.0.3/billiard/process.py0000644000175000017500000002555713132743245017224 0ustar omeromer00000000000000# # Module providing the `Process` class which emulates `threading.Thread` # # multiprocessing/process.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import # # Imports # import os import sys import signal import itertools import logging import threading from _weakrefset import WeakSet from multiprocessing import process as _mproc from .five import items, string_t try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) except OSError: ORIGINAL_DIR = None __all__ = ['BaseProcess', 'Process', 'current_process', 'active_children'] # # Public functions # def current_process(): ''' Return process object representing the current process ''' return _current_process def _set_current_process(process): global _current_process _current_process = _mproc._current_process = process def _cleanup(): # check for processes which have finished for p in list(_children): if p._popen.poll() is not None: _children.discard(p) def _maybe_flush(f): try: f.flush() except (AttributeError, EnvironmentError, NotImplementedError): pass def active_children(_cleanup=_cleanup): ''' Return list of process objects corresponding to live child processes ''' try: _cleanup() except TypeError: # called after gc collect so _cleanup does not exist anymore return [] return list(_children) class BaseProcess(object): ''' Process objects represent activity that is run in a separate process The class is analagous to `threading.Thread` ''' def _Popen(self): raise NotImplementedError() def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, daemon=None, **_kw): assert group is None, 'group argument must be None for now' count = next(_process_counter) self._identity = _current_process._identity + (count, ) self._config = _current_process._config.copy() self._parent_pid = os.getpid() self._popen = None self._target = target self._args = tuple(args) self._kwargs = dict(kwargs) self._name = ( name or type(self).__name__ + '-' + ':'.join(str(i) for i in self._identity) ) if daemon is not None: self.daemon = daemon if _dangling is not None: _dangling.add(self) self._controlled_termination = False def run(self): ''' Method to be run in sub-process; can be overridden in sub-class ''' if self._target: self._target(*self._args, **self._kwargs) def start(self): ''' Start child process ''' assert self._popen is None, 'cannot start a process twice' assert self._parent_pid == os.getpid(), \ 'can only start a process object created by current process' _cleanup() self._popen = self._Popen(self) self._sentinel = self._popen.sentinel _children.add(self) def close(self): if self._popen is not None: self._popen.close() def terminate(self): ''' Terminate process; sends SIGTERM signal or uses TerminateProcess() ''' self._popen.terminate() def terminate_controlled(self): self._controlled_termination = True self.terminate() def join(self, timeout=None): ''' Wait until child process terminates ''' assert self._parent_pid == os.getpid(), 'can only join a child process' assert self._popen is not None, 'can only join a started process' res = self._popen.wait(timeout) if res is not None: _children.discard(self) self.close() def is_alive(self): ''' Return whether process is alive ''' if self is _current_process: return True assert self._parent_pid == os.getpid(), 'can only test a child process' if self._popen is None: return False self._popen.poll() return self._popen.returncode is None def _is_alive(self): if self._popen is None: return False return self._popen.poll() is None @property def name(self): return self._name @name.setter def name(self, name): # noqa assert isinstance(name, string_t), 'name must be a string' self._name = name @property def daemon(self): ''' Return whether process is a daemon ''' return self._config.get('daemon', False) @daemon.setter # noqa def daemon(self, daemonic): ''' Set whether process is a daemon ''' assert self._popen is None, 'process has already started' self._config['daemon'] = daemonic @property def authkey(self): return self._config['authkey'] @authkey.setter # noqa def authkey(self, authkey): ''' Set authorization key of process ''' self._config['authkey'] = AuthenticationString(authkey) @property def exitcode(self): ''' Return exit code of process or `None` if it has yet to stop ''' if self._popen is None: return self._popen return self._popen.poll() @property def ident(self): ''' Return identifier (PID) of process or `None` if it has yet to start ''' if self is _current_process: return os.getpid() else: return self._popen and self._popen.pid pid = ident @property def sentinel(self): ''' Return a file descriptor (Unix) or handle (Windows) suitable for waiting for process termination. ''' try: return self._sentinel except AttributeError: raise ValueError("process not started") @property def _counter(self): # compat for 2.7 return _process_counter @property def _children(self): # compat for 2.7 return _children @property def _authkey(self): # compat for 2.7 return self.authkey @property def _daemonic(self): # compat for 2.7 return self.daemon @property def _tempdir(self): # compat for 2.7 return self._config.get('tempdir') def __repr__(self): if self is _current_process: status = 'started' elif self._parent_pid != os.getpid(): status = 'unknown' elif self._popen is None: status = 'initial' else: if self._popen.poll() is not None: status = self.exitcode else: status = 'started' if type(status) is int: if status == 0: status = 'stopped' else: status = 'stopped[%s]' % _exitcode_to_name.get(status, status) return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, status, self.daemon and ' daemon' or '') ## def _bootstrap(self): from . import util, context global _current_process, _process_counter, _children try: if self._start_method is not None: context._force_start_method(self._start_method) _process_counter = itertools.count(1) _children = set() if sys.stdin is not None: try: sys.stdin.close() sys.stdin = open(os.devnull) except (OSError, ValueError): pass old_process = _current_process _set_current_process(self) # Re-init logging system. # Workaround for http://bugs.python.org/issue6721/#msg140215 # Python logging module uses RLock() objects which are broken # after fork. This can result in a deadlock (Celery Issue #496). loggerDict = logging.Logger.manager.loggerDict logger_names = list(loggerDict.keys()) logger_names.append(None) # for root logger for name in logger_names: if not name or not isinstance(loggerDict[name], logging.PlaceHolder): for handler in logging.getLogger(name).handlers: handler.createLock() logging._lock = threading.RLock() try: util._finalizer_registry.clear() util._run_after_forkers() finally: # delay finalization of the old process object until after # _run_after_forkers() is executed del old_process util.info('child process %s calling self.run()', self.pid) try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit as exc: if not exc.args: exitcode = 1 elif isinstance(exc.args[0], int): exitcode = exc.args[0] else: sys.stderr.write(str(exc.args[0]) + '\n') _maybe_flush(sys.stderr) exitcode = 0 if isinstance(exc.args[0], str) else 1 except: exitcode = 1 if not util.error('Process %s', self.name, exc_info=True): import traceback sys.stderr.write('Process %s:\n' % self.name) traceback.print_exc() finally: util.info('process %s exiting with exitcode %d', self.pid, exitcode) _maybe_flush(sys.stdout) _maybe_flush(sys.stderr) return exitcode # # We subclass bytes to avoid accidental transmission of auth keys over network # class AuthenticationString(bytes): def __reduce__(self): from .context import get_spawning_popen if get_spawning_popen() is None: raise TypeError( 'Pickling an AuthenticationString object is ' 'disallowed for security reasons') return AuthenticationString, (bytes(self),) # # Create object representing the main process # class _MainProcess(BaseProcess): def __init__(self): self._identity = () self._name = 'MainProcess' self._parent_pid = None self._popen = None self._config = {'authkey': AuthenticationString(os.urandom(32)), 'semprefix': '/mp'} _current_process = _MainProcess() _process_counter = itertools.count(1) _children = set() del _MainProcess Process = BaseProcess # # Give names to some return codes # _exitcode_to_name = {} for name, signum in items(signal.__dict__): if name[:3] == 'SIG' and '_' not in name: _exitcode_to_name[-signum] = name # For debug and leak testing _dangling = WeakSet() billiard-3.5.0.3/billiard/popen_spawn_win32.py0000644000175000017500000000656013132743245021112 0ustar omeromer00000000000000from __future__ import absolute_import import io import os import msvcrt import signal import sys from . import context from . import spawn from . import reduction from .compat import _winapi __all__ = ['Popen'] # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # if sys.platform == 'win32': try: from _winapi import CreateProcess except ImportError: # Py2.7 from _subprocess import CreateProcess class Popen(object): ''' Start a subprocess to run the code of a process object ''' method = 'spawn' sentinel = None def __init__(self, process_obj): os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1" spawn._Django_old_layout_hack__save() prep_data = spawn.get_preparation_data(process_obj._name) # read end of pipe will be "stolen" by the child process # -- see spawn_main() in spawn.py. rhandle, whandle = _winapi.CreatePipe(None, 0) wfd = msvcrt.open_osfhandle(whandle, 0) cmd = spawn.get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) cmd = ' '.join('"%s"' % x for x in cmd) with io.open(wfd, 'wb', closefd=True) as to_child: # start process try: hp, ht, pid, tid = CreateProcess( spawn.get_executable(), cmd, None, None, False, 0, None, None, None) _winapi.CloseHandle(ht) except: _winapi.CloseHandle(rhandle) raise # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) # send information to child context.set_spawning_popen(self) try: reduction.dump(prep_data, to_child) reduction.dump(process_obj, to_child) finally: context.set_spawning_popen(None) def close(self): if self.sentinel is not None: try: _winapi.CloseHandle(self.sentinel) finally: self.sentinel = None def duplicate_for_child(self, handle): assert self is context.get_spawning_popen() return reduction.duplicate(handle, self.sentinel) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _winapi.TerminateProcess(int(self._handle), TERMINATE) except OSError: if self.wait(timeout=1.0) is None: raise billiard-3.5.0.3/billiard/util.py0000644000175000017500000001006313132743245016505 0ustar omeromer00000000000000# # Module providing various facilities to other parts of the package # # billiard/util.py # # Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import sys import errno import functools import atexit try: from subprocess import _args_from_interpreter_flags # noqa except ImportError: # pragma: no cover def _args_from_interpreter_flags(): # noqa """Return a list of command-line arguments reproducing the current settings in sys.flags and sys.warnoptions.""" flag_opt_map = { 'debug': 'd', 'optimize': 'O', 'dont_write_bytecode': 'B', 'no_user_site': 's', 'no_site': 'S', 'ignore_environment': 'E', 'verbose': 'v', 'bytes_warning': 'b', 'hash_randomization': 'R', 'py3k_warning': '3', } args = [] for flag, opt in flag_opt_map.items(): v = getattr(sys.flags, flag) if v > 0: args.append('-' + opt * v) for opt in sys.warnoptions: args.append('-W' + opt) return args from multiprocessing.util import ( # noqa _afterfork_registry, _afterfork_counter, _exit_function, _finalizer_registry, _finalizer_counter, Finalize, ForkAwareLocal, ForkAwareThreadLock, get_temp_dir, is_exiting, register_after_fork, _run_after_forkers, _run_finalizers, ) from .compat import get_errno __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 'SUBDEBUG', 'SUBWARNING', ] # # Logging # NOTSET = 0 SUBDEBUG = 5 DEBUG = 10 INFO = 20 SUBWARNING = 25 ERROR = 40 LOGGER_NAME = 'multiprocessing' DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False def sub_debug(msg, *args, **kwargs): if _logger: _logger.log(SUBDEBUG, msg, *args, **kwargs) def debug(msg, *args, **kwargs): if _logger: _logger.log(DEBUG, msg, *args, **kwargs) def info(msg, *args, **kwargs): if _logger: _logger.log(INFO, msg, *args, **kwargs) def sub_warning(msg, *args, **kwargs): if _logger: _logger.log(SUBWARNING, msg, *args, **kwargs) def error(msg, *args, **kwargs): if _logger: _logger.log(ERROR, msg, *args, **kwargs) def get_logger(): ''' Returns logger used by multiprocessing ''' global _logger import logging logging._acquireLock() try: if not _logger: _logger = logging.getLogger(LOGGER_NAME) _logger.propagate = 0 logging.addLevelName(SUBDEBUG, 'SUBDEBUG') logging.addLevelName(SUBWARNING, 'SUBWARNING') # XXX multiprocessing should cleanup before logging if hasattr(atexit, 'unregister'): atexit.unregister(_exit_function) atexit.register(_exit_function) else: atexit._exithandlers.remove((_exit_function, (), {})) atexit._exithandlers.append((_exit_function, (), {})) finally: logging._releaseLock() return _logger def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' global _log_to_stderr import logging logger = get_logger() formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level: logger.setLevel(level) _log_to_stderr = True return _logger def _eintr_retry(func): ''' Automatic retry after EINTR. ''' @functools.wraps(func) def wrapped(*args, **kwargs): while 1: try: return func(*args, **kwargs) except OSError as exc: if get_errno(exc) != errno.EINTR: raise return wrapped billiard-3.5.0.3/billiard/__init__.py0000644000175000017500000000307413132746162017274 0ustar omeromer00000000000000"""Python multiprocessing fork with improvements and bugfixes""" # # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Try calling `multiprocessing.doc.main()` to read the html # documentation in a webbrowser. # # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import sys from . import context VERSION = (3, 5, 0, 3) __version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:]) __author__ = 'R Oudkerk / Python Software Foundation' __author_email__ = 'python-dev@python.org' __maintainer__ = 'Ask Solem' __contact__ = "ask@celeryproject.org" __homepage__ = "http://github.com/celery/billiard" __docformat__ = "restructuredtext" # -eof meta- # # Copy stuff from default context # globals().update((name, getattr(context._default_context, name)) for name in context._default_context.__all__) __all__ = context._default_context.__all__ # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def ensure_multiprocessing(): from ._ext import ensure_multiprocessing return ensure_multiprocessing() billiard-3.5.0.3/billiard/semaphore_tracker.py0000644000175000017500000001143513132743245021232 0ustar omeromer00000000000000# # On Unix we run a server process which keeps track of unlinked # semaphores. The server ignores SIGINT and SIGTERM and reads from a # pipe. Every other process of the program has a copy of the writable # end of the pipe, so we get EOF when all other processes have exited. # Then the server process unlinks any remaining semaphore names. # # This is important because the system only supports a limited number # of named semaphores, and they will not be automatically removed till # the next reboot. Without this semaphore tracker process, "killall # python" would probably leave unlinked semaphores. # from __future__ import absolute_import import io import os import signal import sys import threading import warnings from ._ext import _billiard from . import spawn from . import util from .compat import spawnv_passfds __all__ = ['ensure_running', 'register', 'unregister'] class SemaphoreTracker(object): def __init__(self): self._lock = threading.Lock() self._fd = None def getfd(self): self.ensure_running() return self._fd def ensure_running(self): '''Make sure that semaphore tracker process is running. This can be run from any process. Usually a child process will use the semaphore created by its parent.''' with self._lock: if self._fd is not None: return fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass cmd = 'from billiard.semaphore_tracker import main;main(%d)' r, w = os.pipe() try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() args += ['-c', cmd % r] spawnv_passfds(exe, args, fds_to_pass) except: os.close(w) raise else: self._fd = w finally: os.close(r) def register(self, name): '''Register name of semaphore with semaphore tracker.''' self._send('REGISTER', name) def unregister(self, name): '''Unregister name of semaphore with semaphore tracker.''' self._send('UNREGISTER', name) def _send(self, cmd, name): self.ensure_running() msg = '{0}:{1}\n'.format(cmd, name).encode('ascii') if len(name) > 512: # posix guarantees that writes to a pipe of less than PIPE_BUF # bytes are atomic, and that PIPE_BUF >= 512 raise ValueError('name too long') nbytes = os.write(self._fd, msg) assert nbytes == len(msg) _semaphore_tracker = SemaphoreTracker() ensure_running = _semaphore_tracker.ensure_running register = _semaphore_tracker.register unregister = _semaphore_tracker.unregister getfd = _semaphore_tracker.getfd def main(fd): '''Run semaphore tracker.''' # protect the process from ^C and "killall python" etc signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) for f in (sys.stdin, sys.stdout): try: f.close() except Exception: pass cache = set() try: # keep track of registered/unregistered semaphores with io.open(fd, 'rb') as f: for line in f: try: cmd, name = line.strip().split(b':') if cmd == b'REGISTER': cache.add(name) elif cmd == b'UNREGISTER': cache.remove(name) else: raise RuntimeError('unrecognized command %r' % cmd) except Exception: try: sys.excepthook(*sys.exc_info()) except: pass finally: # all processes have terminated; cleanup any remaining semaphores if cache: try: warnings.warn('semaphore_tracker: There appear to be %d ' 'leaked semaphores to clean up at shutdown' % len(cache)) except Exception: pass for name in cache: # For some reason the process which created and registered this # semaphore has failed to unregister it. Presumably it has died. # We therefore unlink it. try: name = name.decode('ascii') try: _billiard.sem_unlink(name) except Exception as e: warnings.warn('semaphore_tracker: %r: %s' % (name, e)) finally: pass billiard-3.5.0.3/billiard/compat.py0000644000175000017500000002103713132743245017016 0ustar omeromer00000000000000from __future__ import absolute_import import errno import numbers import os import sys from .five import range, zip_longest if sys.platform == 'win32': try: import _winapi # noqa except ImportError: # pragma: no cover from _multiprocessing import win32 as _winapi # noqa else: _winapi = None # noqa try: import resource except ImportError: # pragma: no cover resource = None try: from io import UnsupportedOperation FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation) except ImportError: # pragma: no cover # Py2 FILENO_ERRORS = (AttributeError, ValueError) # noqa if sys.version_info > (2, 7, 5): buf_t, is_new_buffer = memoryview, True # noqa else: buf_t, is_new_buffer = buffer, False # noqa if hasattr(os, 'write'): __write__ = os.write if is_new_buffer: def send_offset(fd, buf, offset): return __write__(fd, buf[offset:]) else: # Py<2.7.6 def send_offset(fd, buf, offset): # noqa return __write__(fd, buf_t(buf, offset)) else: # non-posix platform def send_offset(fd, buf, offset): # noqa raise NotImplementedError('send_offset') try: fsencode = os.fsencode fsdecode = os.fsdecode except AttributeError: def _fscodec(): encoding = sys.getfilesystemencoding() if encoding == 'mbcs': errors = 'strict' else: errors = 'surrogateescape' def fsencode(filename): """ Encode filename to the filesystem encoding with 'surrogateescape' error handler, return bytes unchanged. On Windows, use 'strict' error handler if the file system encoding is 'mbcs' (which is the default encoding). """ if isinstance(filename, bytes): return filename elif isinstance(filename, str): return filename.encode(encoding, errors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) def fsdecode(filename): """ Decode filename from the filesystem encoding with 'surrogateescape' error handler, return str unchanged. On Windows, use 'strict' error handler if the file system encoding is 'mbcs' (which is the default encoding). """ if isinstance(filename, str): return filename elif isinstance(filename, bytes): return filename.decode(encoding, errors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) return fsencode, fsdecode fsencode, fsdecode = _fscodec() del _fscodec if sys.version_info[0] == 3: bytes = bytes else: _bytes = bytes # the 'bytes' alias in Python2 does not support an encoding argument. class bytes(_bytes): # noqa def __new__(cls, *args): if len(args) > 1: return _bytes(args[0]).encode(*args[1:]) return _bytes(*args) def maybe_fileno(f): """Get object fileno, or :const:`None` if not defined.""" if isinstance(f, numbers.Integral): return f try: return f.fileno() except FILENO_ERRORS: pass def get_fdmax(default=None): """Return the maximum number of open file descriptors on this system. :keyword default: Value returned if there's no file descriptor limit. """ try: return os.sysconf('SC_OPEN_MAX') except: pass if resource is None: # Windows return default fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if fdmax == resource.RLIM_INFINITY: return default return fdmax def uniq(it): """Return all unique elements in ``it``, preserving order.""" seen = set() return (seen.add(obj) or obj for obj in it if obj not in seen) try: closerange = os.closerange except AttributeError: def closerange(fd_low, fd_high): # noqa for fd in reversed(range(fd_low, fd_high)): try: os.close(fd) except OSError as exc: if exc.errno != errno.EBADF: raise def close_open_fds(keep=None): # must make sure this is 0-inclusive (Issue #celery/1882) keep = list(uniq(sorted( f for f in map(maybe_fileno, keep or []) if f is not None ))) maxfd = get_fdmax(default=2048) kL, kH = iter([-1] + keep), iter(keep + [maxfd]) for low, high in zip_longest(kL, kH): if low + 1 != high: closerange(low + 1, high) else: def close_open_fds(keep=None): # noqa keep = [maybe_fileno(f) for f in (keep or []) if maybe_fileno(f) is not None] for fd in reversed(range(get_fdmax(default=2048))): if fd not in keep: try: os.close(fd) except OSError as exc: if exc.errno != errno.EBADF: raise def get_errno(exc): """:exc:`socket.error` and :exc:`IOError` first got the ``.errno`` attribute in Py2.7""" try: return exc.errno except AttributeError: try: # e.args = (errno, reason) if isinstance(exc.args, tuple) and len(exc.args) == 2: return exc.args[0] except AttributeError: pass return 0 try: import _posixsubprocess except ImportError: def spawnv_passfds(path, args, passfds): if sys.platform != 'win32': # when not using _posixsubprocess (on earlier python) and not on # windows, we want to keep stdout/stderr open... passfds = passfds + [ maybe_fileno(sys.stdout), maybe_fileno(sys.stderr), ] pid = os.fork() if not pid: close_open_fds(keep=sorted(f for f in passfds if f)) os.execv(fsencode(path), args) return pid else: def spawnv_passfds(path, args, passfds): passfds = sorted(passfds) errpipe_read, errpipe_write = os.pipe() try: return _posixsubprocess.fork_exec( args, [fsencode(path)], True, passfds, None, None, -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, False, False, None) finally: os.close(errpipe_read) os.close(errpipe_write) if sys.platform == 'win32': def setblocking(handle, blocking): raise NotImplementedError('setblocking not implemented on win32') def isblocking(handle): raise NotImplementedError('isblocking not implemented on win32') else: from os import O_NONBLOCK from fcntl import fcntl, F_GETFL, F_SETFL def isblocking(handle): # noqa return not (fcntl(handle, F_GETFL) & O_NONBLOCK) def setblocking(handle, blocking): # noqa flags = fcntl(handle, F_GETFL, 0) fcntl( handle, F_SETFL, flags & (~O_NONBLOCK) if blocking else flags | O_NONBLOCK, ) E_PSUTIL_MISSING = """ On Windows, the ability to inspect memory usage requires the psutil library. You can install it using pip: $ pip install psutil """ E_RESOURCE_MISSING = """ Your platform ({0}) does not seem to have the `resource.getrusage' function. Please open an issue so that we can add support for this platform. """ if sys.platform == 'win32': try: import psutil except ImportError: # pragma: no cover psutil = None # noqa def mem_rss(): # type () -> int if psutil is None: raise ImportError(E_PSUTIL_MISSING.strip()) return int(psutil.Process(os.getpid()).memory_info()[0] / 1024.0) else: try: from resource import getrusage, RUSAGE_SELF except ImportError: # pragma: no cover getrusage = RUSAGE_SELF = None # noqa if 'bsd' in sys.platform or sys.platform == 'darwin': # On BSD platforms :man:`getrusage(2)` ru_maxrss field is in bytes. def maxrss_to_kb(v): # type: (SupportsInt) -> int return int(v) / 1024.0 else: # On Linux it's kilobytes. def maxrss_to_kb(v): # type: (SupportsInt) -> int return int(v) def mem_rss(): # type () -> int if resource is None: raise ImportError(E_RESOURCE_MISSING.strip().format(sys.platform)) return maxrss_to_kb(getrusage(RUSAGE_SELF).ru_maxrss) billiard-3.5.0.3/billiard/common.py0000644000175000017500000001122413132743245017020 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """ This module contains utilities added by billiard, to keep "non-core" functionality out of ``.util``.""" from __future__ import absolute_import import os import signal import sys import pickle as pypickle try: import cPickle as cpickle except ImportError: # pragma: no cover cpickle = None # noqa from .exceptions import RestartFreqExceeded from .five import monotonic if sys.version_info < (2, 6): # pragma: no cover # cPickle does not use absolute_imports pickle = pypickle pickle_load = pypickle.load pickle_loads = pypickle.loads else: pickle = cpickle or pypickle pickle_load = pickle.load pickle_loads = pickle.loads # cPickle.loads does not support buffer() objects, # but we can just create a StringIO and use load. if sys.version_info[0] == 3: from io import BytesIO else: try: from cStringIO import StringIO as BytesIO # noqa except ImportError: from StringIO import StringIO as BytesIO # noqa SIGMAP = dict( (getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG') ) for _alias_sig in ('SIGHUP', 'SIGABRT'): try: # Alias for deprecated signal overwrites the name we want SIGMAP[getattr(signal, _alias_sig)] = _alias_sig except AttributeError: pass TERM_SIGNAL, TERM_SIGNAME = signal.SIGTERM, 'SIGTERM' REMAP_SIGTERM = os.environ.get('REMAP_SIGTERM') if REMAP_SIGTERM: TERM_SIGNAL, TERM_SIGNAME = ( getattr(signal, REMAP_SIGTERM), REMAP_SIGTERM) TERMSIGS_IGNORE = {'SIGTERM'} if REMAP_SIGTERM else set() TERMSIGS_FORCE = {'SIGQUIT'} if REMAP_SIGTERM else set() EX_SOFTWARE = 70 TERMSIGS_DEFAULT = { 'SIGHUP', 'SIGQUIT', TERM_SIGNAME, 'SIGUSR1', 'SIGUSR2' } TERMSIGS_FULL = { 'SIGHUP', 'SIGQUIT', 'SIGTRAP', 'SIGABRT', 'SIGEMT', 'SIGSYS', 'SIGPIPE', 'SIGALRM', TERM_SIGNAME, 'SIGXCPU', 'SIGXFSZ', 'SIGVTALRM', 'SIGPROF', 'SIGUSR1', 'SIGUSR2', } #: set by signal handlers just before calling exit. #: if this is true after the sighandler returns it means that something #: went wrong while terminating the process, and :func:`os._exit` #: must be called ASAP. _should_have_exited = [False] def human_status(status): if (status or 0) < 0: try: return 'signal {0} ({1})'.format(-status, SIGMAP[-status]) except KeyError: return 'signal {0}'.format(-status) return 'exitcode {0}'.format(status) def pickle_loads(s, load=pickle_load): # used to support buffer objects return load(BytesIO(s)) def maybe_setsignal(signum, handler): try: signal.signal(signum, handler) except (OSError, AttributeError, ValueError, RuntimeError): pass def _shutdown_cleanup(signum, frame): # we will exit here so if the signal is received a second time # we can be sure that something is very wrong and we may be in # a crashing loop. if _should_have_exited[0]: os._exit(EX_SOFTWARE) maybe_setsignal(signum, signal.SIG_DFL) _should_have_exited[0] = True sys.exit(-(256 - signum)) def signum(sig): return getattr(signal, sig, None) def _should_override_term_signal(sig, current): return ( sig in TERMSIGS_FORCE or (current is not None and current != signal.SIG_IGN) ) def reset_signals(handler=_shutdown_cleanup, full=False): for sig in TERMSIGS_FULL if full else TERMSIGS_DEFAULT: num = signum(sig) if num: if _should_override_term_signal(sig, signal.getsignal(num)): maybe_setsignal(num, handler) for sig in TERMSIGS_IGNORE: num = signum(sig) if num: maybe_setsignal(num, signal.SIG_IGN) class restart_state(object): RestartFreqExceeded = RestartFreqExceeded def __init__(self, maxR, maxT): self.maxR, self.maxT = maxR, maxT self.R, self.T = 0, None def step(self, now=None): now = monotonic() if now is None else now R = self.R if self.T and now - self.T >= self.maxT: # maxT passed, reset counter and time passed. self.T, self.R = now, 0 elif self.maxR and self.R >= self.maxR: # verify that R has a value as the result handler # resets this when a job is accepted. If a job is accepted # the startup probably went fine (startup restart burst # protection) if self.R: # pragma: no cover self.R = 0 # reset in case someone catches the error raise self.RestartFreqExceeded("%r in %rs" % (R, self.maxT)) # first run sets T if self.T is None: self.T = now self.R += 1 billiard-3.5.0.3/billiard/popen_fork.py0000644000175000017500000000505013132743245017672 0ustar omeromer00000000000000from __future__ import absolute_import import os import sys import errno from .common import TERM_SIGNAL __all__ = ['Popen'] # # Start child process using fork # class Popen(object): method = 'fork' sentinel = None def __init__(self, process_obj): sys.stdout.flush() sys.stderr.flush() self.returncode = None self._launch(process_obj) def duplicate_for_child(self, fd): return fd def poll(self, flag=os.WNOHANG): if self.returncode is None: while True: try: pid, sts = os.waitpid(self.pid, flag) except OSError as e: if e.errno == errno.EINTR: continue # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None else: break if pid == self.pid: if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) else: assert os.WIFEXITED(sts) self.returncode = os.WEXITSTATUS(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: from .connection import wait if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def terminate(self): if self.returncode is None: try: os.kill(self.pid, TERM_SIGNAL) except OSError as exc: if getattr(exc, 'errno', None) != errno.ESRCH: if self.wait(timeout=0.1) is None: raise def _launch(self, process_obj): code = 1 parent_r, child_w = os.pipe() self.pid = os.fork() if self.pid == 0: try: os.close(parent_r) if 'random' in sys.modules: import random random.seed() code = process_obj._bootstrap() finally: os._exit(code) else: os.close(child_w) self.sentinel = parent_r def close(self): if self.sentinel is not None: try: os.close(self.sentinel) finally: self.sentinel = None billiard-3.5.0.3/billiard/queues.py0000644000175000017500000003025313132743245017042 0ustar omeromer00000000000000# # Module implementing queues # # multiprocessing/queues.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import sys import os import threading import collections import weakref import errno from . import connection from . import context from .compat import get_errno from .five import monotonic, Empty, Full from .util import ( debug, error, info, Finalize, register_after_fork, is_exiting, ) from .reduction import ForkingPickler __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] class Queue(object): ''' Queue type using a pipe, buffer and thread ''' def __init__(self, maxsize=0, *args, **kwargs): try: ctx = kwargs['ctx'] except KeyError: raise TypeError('missing 1 required keyword-only argument: ctx') if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from .synchronize import SEM_VALUE_MAX as maxsize # noqa self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork) def __getstate__(self): context.assert_spawning(self) return (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) def __setstate__(self, state): (self._ignore_epipe, self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid) = state self._after_fork() def _after_fork(self): debug('Queue._after_fork()') self._notempty = threading.Condition(threading.Lock()) self._buffer = collections.deque() self._thread = None self._jointhread = None self._joincancelled = False self._closed = False self._close = None self._send_bytes = self._writer.send self._recv = self._reader.recv self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll def put(self, obj, block=True, timeout=None): assert not self._closed if not self._sem.acquire(block, timeout): raise Full with self._notempty: if self._thread is None: self._start_thread() self._buffer.append(obj) self._notempty.notify() def get(self, block=True, timeout=None): if block and timeout is None: with self._rlock: res = self._recv_bytes() self._sem.release() else: if block: deadline = monotonic() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: timeout = deadline - monotonic() if timeout < 0 or not self._poll(timeout): raise Empty elif not self._poll(): raise Empty res = self._recv_bytes() self._sem.release() finally: self._rlock.release() # unserialize the data after having released the lock return ForkingPickler.loads(res) def qsize(self): # Raises NotImplementedError on macOS because # of broken sem_getvalue() return self._maxsize - self._sem._semlock._get_value() def empty(self): return not self._poll() def full(self): return self._sem._semlock._is_zero() def get_nowait(self): return self.get(False) def put_nowait(self, obj): return self.put(obj, False) def close(self): self._closed = True try: self._reader.close() finally: close = self._close if close: self._close = None close() def join_thread(self): debug('Queue.join_thread()') assert self._closed if self._jointhread: self._jointhread() def cancel_join_thread(self): debug('Queue.cancel_join_thread()') self._joincancelled = True try: self._jointhread.cancel() except AttributeError: pass def _start_thread(self): debug('Queue._start_thread()') # Start thread which transfers data from buffer to pipe self._buffer.clear() self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, self._wlock, self._writer.close, self._ignore_epipe), name='QueueFeederThread' ) self._thread.daemon = True debug('doing self._thread.start()') self._thread.start() debug('... done self._thread.start()') # On process exit we will wait for data to be flushed to pipe. # # However, if this process created the queue then all # processes which use the queue will be descendants of this # process. Therefore waiting for the queue to be flushed # is pointless once all the child processes have been joined. created_by_this_process = (self._opid == os.getpid()) if not self._joincancelled and not created_by_this_process: self._jointhread = Finalize( self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5 ) # Send sentinel to the thread queue object when garbage collected self._close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 ) @staticmethod def _finalize_join(twr): debug('joining queue thread') thread = twr() if thread is not None: thread.join() debug('... queue thread joined') else: debug('... queue thread already dead') @staticmethod def _finalize_close(buffer, notempty): debug('telling queue thread to quit') with notempty: buffer.append(_sentinel) notempty.notify() @staticmethod def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None try: while 1: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return # serialize the data before acquiring the lock obj = ForkingPickler.dumps(obj) if wacquire is None: send_bytes(obj) else: wacquire() try: send_bytes(obj) finally: wrelease() except IndexError: pass except Exception as exc: if ignore_epipe and get_errno(exc) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. try: if is_exiting(): info('error in queue thread: %r', exc, exc_info=True) else: if not error('error in queue thread: %r', exc, exc_info=True): import traceback traceback.print_exc() except Exception: pass _sentinel = object() class JoinableQueue(Queue): ''' A queue type which also supports join() and task_done() methods Note that if you do not call task_done() for each finished task then eventually the counter's semaphore may overflow causing Bad Things to happen. ''' def __init__(self, maxsize=0, *args, **kwargs): try: ctx = kwargs['ctx'] except KeyError: raise TypeError('missing 1 required keyword argument: ctx') Queue.__init__(self, maxsize, ctx=ctx) self._unfinished_tasks = ctx.Semaphore(0) self._cond = ctx.Condition() def __getstate__(self): return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) def __setstate__(self, state): Queue.__setstate__(self, state[:-2]) self._cond, self._unfinished_tasks = state[-2:] def put(self, obj, block=True, timeout=None): assert not self._closed if not self._sem.acquire(block, timeout): raise Full with self._notempty: with self._cond: if self._thread is None: self._start_thread() self._buffer.append(obj) self._unfinished_tasks.release() self._notempty.notify() def task_done(self): with self._cond: if not self._unfinished_tasks.acquire(False): raise ValueError('task_done() called too many times') if self._unfinished_tasks._semlock._is_zero(): self._cond.notify_all() def join(self): with self._cond: if not self._unfinished_tasks._semlock._is_zero(): self._cond.wait() class _SimpleQueue(object): ''' Simplified Queue type -- really just a locked pipe ''' def __init__(self, rnonblock=False, wnonblock=False, ctx=None): self._reader, self._writer = connection.Pipe( duplex=False, rnonblock=rnonblock, wnonblock=wnonblock, ) self._poll = self._reader.poll self._rlock = self._wlock = None def empty(self): return not self._poll() def __getstate__(self): context.assert_spawning(self) return (self._reader, self._writer, self._rlock, self._wlock) def __setstate__(self, state): (self._reader, self._writer, self._rlock, self._wlock) = state def get_payload(self): return self._reader.recv_bytes() def send_payload(self, value): self._writer.send_bytes(value) def get(self): # unserialize the data after having released the lock return ForkingPickler.loads(self.get_payload()) def put(self, obj): # serialize the data before acquiring the lock self.send_payload(ForkingPickler.dumps(obj)) class SimpleQueue(_SimpleQueue): def __init__(self, *args, **kwargs): try: ctx = kwargs['ctx'] except KeyError: raise TypeError('missing required keyword argument: ctx') self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._wlock = ctx.Lock() if sys.platform != 'win32' else None def get_payload(self): with self._rlock: return self._reader.recv_bytes() def send_payload(self, value): if self._wlock is None: # writes to a message oriented win32 pipe are atomic self._writer.send_bytes(value) else: with self._wlock: self._writer.send_bytes(value) billiard-3.5.0.3/billiard/resource_sharer.py0000644000175000017500000001236113132743245020726 0ustar omeromer00000000000000# # We use a background thread for sharing fds on Unix, and for sharing # sockets on Windows. # # A client which wants to pickle a resource registers it with the resource # sharer and gets an identifier in return. The unpickling process will connect # to the resource sharer, sends the identifier and its pid, and then receives # the resource. # from __future__ import absolute_import import os import signal import socket import sys import threading from . import process from . import reduction from . import util __all__ = ['stop'] if sys.platform == 'win32': __all__ += ['DupSocket'] class DupSocket(object): '''Picklable wrapper for a socket.''' def __init__(self, sock): new_sock = sock.dup() def send(conn, pid): share = new_sock.share(pid) conn.send_bytes(share) self._id = _resource_sharer.register(send, new_sock.close) def detach(self): '''Get the socket. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: share = conn.recv_bytes() return socket.fromshare(share) else: __all__ += ['DupFd'] class DupFd(object): '''Wrapper for fd which can be used at any time.''' def __init__(self, fd): new_fd = os.dup(fd) def send(conn, pid): reduction.send_handle(conn, new_fd, pid) def close(): os.close(new_fd) self._id = _resource_sharer.register(send, close) def detach(self): '''Get the fd. This should only be called once.''' with _resource_sharer.get_connection(self._id) as conn: return reduction.recv_handle(conn) class _ResourceSharer(object): '''Manager for resouces using background thread.''' def __init__(self): self._key = 0 self._cache = {} self._old_locks = [] self._lock = threading.Lock() self._listener = None self._address = None self._thread = None util.register_after_fork(self, _ResourceSharer._afterfork) def register(self, send, close): '''Register resource, returning an identifier.''' with self._lock: if self._address is None: self._start() self._key += 1 self._cache[self._key] = (send, close) return (self._address, self._key) @staticmethod def get_connection(ident): '''Return connection from which to receive identified resource.''' from .connection import Client address, key = ident c = Client(address, authkey=process.current_process().authkey) c.send((key, os.getpid())) return c def stop(self, timeout=None): '''Stop the background thread and clear registered resources.''' from .connection import Client with self._lock: if self._address is not None: c = Client(self._address, authkey=process.current_process().authkey) c.send(None) c.close() self._thread.join(timeout) if self._thread.is_alive(): util.sub_warning('_ResourceSharer thread did ' 'not stop when asked') self._listener.close() self._thread = None self._address = None self._listener = None for key, (send, close) in self._cache.items(): close() self._cache.clear() def _afterfork(self): for key, (send, close) in self._cache.items(): close() self._cache.clear() # If self._lock was locked at the time of the fork, it may be broken # -- see issue 6721. Replace it without letting it be gc'ed. self._old_locks.append(self._lock) self._lock = threading.Lock() if self._listener is not None: self._listener.close() self._listener = None self._address = None self._thread = None def _start(self): from .connection import Listener assert self._listener is None util.debug('starting listener and thread for sending handles') self._listener = Listener(authkey=process.current_process().authkey) self._address = self._listener.address t = threading.Thread(target=self._serve) t.daemon = True t.start() self._thread = t def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG)) while 1: try: with self._listener.accept() as conn: msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) try: send(conn, destination_pid) finally: close() except: if not util.is_exiting(): sys.excepthook(*sys.exc_info()) _resource_sharer = _ResourceSharer() stop = _resource_sharer.stop billiard-3.5.0.3/billiard/sharedctypes.py0000644000175000017500000001513613132743245020234 0ustar omeromer00000000000000# # Module which supports allocation of ctypes objects from shared memory # # multiprocessing/sharedctypes.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import ctypes import sys import weakref from . import heap from . import get_context from .context import assert_spawning from .five import int_types from .reduction import ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] PY3 = sys.version_info[0] == 3 typecode_to_type = { 'c': ctypes.c_char, 'u': ctypes.c_wchar, 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 'h': ctypes.c_short, 'H': ctypes.c_ushort, 'i': ctypes.c_int, 'I': ctypes.c_uint, 'l': ctypes.c_long, 'L': ctypes.c_ulong, 'f': ctypes.c_float, 'd': ctypes.c_double } def _new_value(type_): size = ctypes.sizeof(type_) wrapper = heap.BufferWrapper(size) return rebuild_ctype(type_, wrapper, None) def RawValue(typecode_or_type, *args): ''' Returns a ctypes object allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) obj.__init__(*args) return obj def RawArray(typecode_or_type, size_or_initializer): ''' Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) if isinstance(size_or_initializer, int_types): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) return obj else: type_ = type_ * len(size_or_initializer) result = _new_value(type_) result.__init__(*size_or_initializer) return result def Value(typecode_or_type, *args, **kwds): ''' Return a synchronization wrapper for a Value ''' lock = kwds.pop('lock', None) ctx = kwds.pop('ctx', None) if kwds: raise ValueError( 'unrecognized keyword argument(s): %s' % list(kwds.keys())) obj = RawValue(typecode_or_type, *args) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("'%r' has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def Array(typecode_or_type, size_or_initializer, **kwds): ''' Return a synchronization wrapper for a RawArray ''' lock = kwds.pop('lock', None) ctx = kwds.pop('ctx', None) if kwds: raise ValueError( 'unrecognized keyword argument(s): %s' % list(kwds.keys())) obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj if lock in (True, None): ctx = ctx or get_context() lock = ctx.RLock() if not hasattr(lock, 'acquire'): raise AttributeError("'%r' has no method 'acquire'" % lock) return synchronized(obj, lock, ctx=ctx) def copy(obj): new_obj = _new_value(type(obj)) ctypes.pointer(new_obj)[0] = obj return new_obj def synchronized(obj, lock=None, ctx=None): assert not isinstance(obj, SynchronizedBase), 'object already synchronized' ctx = ctx or get_context() if isinstance(obj, ctypes._SimpleCData): return Synchronized(obj, lock, ctx) elif isinstance(obj, ctypes.Array): if obj._type_ is ctypes.c_char: return SynchronizedString(obj, lock, ctx) return SynchronizedArray(obj, lock, ctx) else: cls = type(obj) try: scls = class_cache[cls] except KeyError: names = [field[0] for field in cls._fields_] d = dict((name, make_property(name)) for name in names) classname = 'Synchronized' + cls.__name__ scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) return scls(obj, lock, ctx) # # Functions for pickling/unpickling # def reduce_ctype(obj): assert_spawning(obj) if isinstance(obj, ctypes.Array): return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) else: return rebuild_ctype, (type(obj), obj._wrapper, None) def rebuild_ctype(type_, wrapper, length): if length is not None: type_ = type_ * length ForkingPickler.register(type_, reduce_ctype) if PY3: buf = wrapper.create_memoryview() obj = type_.from_buffer(buf) else: obj = type_.from_address(wrapper.get_address()) obj._wrapper = wrapper return obj # # Function to create properties # def make_property(name): try: return prop_cache[name] except KeyError: d = {} exec(template % ((name, ) * 7), d) prop_cache[name] = d[name] return d[name] template = ''' def get%s(self): self.acquire() try: return self._obj.%s finally: self.release() def set%s(self, value): self.acquire() try: self._obj.%s = value finally: self.release() %s = property(get%s, set%s) ''' prop_cache = {} class_cache = weakref.WeakKeyDictionary() # # Synchronized wrappers # class SynchronizedBase(object): def __init__(self, obj, lock=None, ctx=None): self._obj = obj if lock: self._lock = lock else: ctx = ctx or get_context(force=True) self._lock = ctx.RLock() self.acquire = self._lock.acquire self.release = self._lock.release def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def __reduce__(self): assert_spawning(self) return synchronized, (self._obj, self._lock) def get_obj(self): return self._obj def get_lock(self): return self._lock def __repr__(self): return '<%s wrapper for %s>' % (type(self).__name__, self._obj) class Synchronized(SynchronizedBase): value = make_property('value') class SynchronizedArray(SynchronizedBase): def __len__(self): return len(self._obj) def __getitem__(self, i): with self: return self._obj[i] def __setitem__(self, i, value): with self: self._obj[i] = value def __getslice__(self, start, stop): with self: return self._obj[start:stop] def __setslice__(self, start, stop, values): with self: self._obj[start:stop] = values class SynchronizedString(SynchronizedArray): value = make_property('value') raw = make_property('raw') billiard-3.5.0.3/billiard/popen_forkserver.py0000644000175000017500000000344513132743245021127 0ustar omeromer00000000000000from __future__ import absolute_import import io import os from . import reduction from . import context from . import forkserver from . import popen_fork from . import spawn __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, ind): self.ind = ind def detach(self): return forkserver.get_inherited_fds()[self.ind] # # Start child process using a server process # class Popen(popen_fork.Popen): method = 'forkserver' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super(Popen, self).__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return len(self._fds) - 1 def _launch(self, process_obj): prep_data = spawn.get_preparation_data(process_obj._name) buf = io.BytesIO() context.set_spawning_popen(self) try: reduction.dump(prep_data, buf) reduction.dump(process_obj, buf) finally: context.set_spawning_popen(None) self.sentinel, w = forkserver.connect_to_new_process(self._fds) with io.open(w, 'wb', closefd=True) as f: f.write(buf.getbuffer()) self.pid = forkserver.read_unsigned(self.sentinel) def poll(self, flag=os.WNOHANG): if self.returncode is None: from .connection import wait timeout = 0 if flag == os.WNOHANG else None if not wait([self.sentinel], timeout): return None try: self.returncode = forkserver.read_unsigned(self.sentinel) except (OSError, EOFError): # The process ended abnormally perhaps because of a signal self.returncode = 255 return self.returncode billiard-3.5.0.3/billiard/dummy/0000755000175000017500000000000013132746522016312 5ustar omeromer00000000000000billiard-3.5.0.3/billiard/dummy/connection.py0000644000175000017500000000561213132743245021026 0ustar omeromer00000000000000# # Analogue of `multiprocessing.connection` which uses queues instead of sockets # # multiprocessing/dummy/connection.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # from __future__ import absolute_import from billiard.five import Queue __all__ = ['Client', 'Listener', 'Pipe'] families = [None] class Listener(object): def __init__(self, address=None, family=None, backlog=1): self._backlog_queue = Queue(backlog) def accept(self): return Connection(*self._backlog_queue.get()) def close(self): self._backlog_queue = None address = property(lambda self: self._backlog_queue) def __enter__(self): return self def __exit__(self, *exc_info): self.close() def Client(address): _in, _out = Queue(), Queue() address.put((_out, _in)) return Connection(_in, _out) def Pipe(duplex=True): a, b = Queue(), Queue() return Connection(a, b), Connection(b, a) class Connection(object): def __init__(self, _in, _out): self._out = _out self._in = _in self.send = self.send_bytes = _out.put self.recv = self.recv_bytes = _in.get def poll(self, timeout=0.0): if self._in.qsize() > 0: return True if timeout <= 0.0: return False self._in.not_empty.acquire() self._in.not_empty.wait(timeout) self._in.not_empty.release() return self._in.qsize() > 0 def close(self): pass billiard-3.5.0.3/billiard/dummy/__init__.py0000644000175000017500000001111013132743245020414 0ustar omeromer00000000000000# # Support for the API of the multiprocessing package using threads # # multiprocessing/dummy/__init__.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # from __future__ import absolute_import # # Imports # import threading import sys import weakref import array from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event from billiard.five import Queue from billiard.connection import Pipe __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' ] class DummyProcess(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): threading.Thread.__init__(self, group, target, name, args, kwargs) self._pid = None self._children = weakref.WeakKeyDictionary() self._start_called = False self._parent = current_process() def start(self): assert self._parent is current_process() self._start_called = True if hasattr(self._parent, '_children'): self._parent._children[self] = None threading.Thread.start(self) @property def exitcode(self): if self._start_called and not self.is_alive(): return 0 else: return None try: _Condition = threading._Condition except AttributeError: # Py3 _Condition = threading.Condition # noqa class Condition(_Condition): if sys.version_info[0] == 3: notify_all = _Condition.notifyAll else: notify_all = _Condition.notifyAll.__func__ Process = DummyProcess current_process = threading.current_thread current_process()._children = weakref.WeakKeyDictionary() def active_children(): children = current_process()._children for p in list(children): if not p.is_alive(): children.pop(p, None) return list(children) def freeze_support(): pass class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return '%s(%s)' % (self.__class__.__name__, str.join(', ', temp)) dict = dict list = list def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def _get(self): return self._value def _set(self, value): self._value = value value = property(_get, _set) def __repr__(self): return '<%r(%r, %r)>' % (type(self).__name__, self._typecode, self._value) def Manager(): return sys.modules[__name__] def shutdown(): pass def Pool(processes=None, initializer=None, initargs=()): from billiard.pool import ThreadPool return ThreadPool(processes, initializer, initargs) JoinableQueue = Queue billiard-3.5.0.3/billiard/synchronize.py0000644000175000017500000003136113132743245020107 0ustar omeromer00000000000000# # Module implementing synchronization primitives # # multiprocessing/synchronize.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import errno import sys import tempfile import threading from . import context from . import process from . import util from ._ext import _billiard, ensure_SemLock from .five import range, monotonic __all__ = [ 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', ] # Try to import the mp.synchronize module cleanly, if it fails # raise ImportError for platforms lacking a working sem_open implementation. # See issue 3770 ensure_SemLock() # # Constants # RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _billiard.SemLock.SEM_VALUE_MAX try: sem_unlink = _billiard.SemLock.sem_unlink except AttributeError: # pragma: no cover try: # Py3.4+ implements sem_unlink and the semaphore must be named from _multiprocessing import sem_unlink # noqa except ImportError: sem_unlink = None # noqa # # Base class for semaphores and mutexes; wraps `_billiard.SemLock` # def _semname(sl): try: return sl.name except AttributeError: pass class SemLock(object): _rand = tempfile._RandomNameSequence() def __init__(self, kind, value, maxvalue, ctx=None): if ctx is None: ctx = context._default_context.get_context() name = ctx.get_start_method() unlink_now = sys.platform == 'win32' or name == 'fork' if sem_unlink: for i in range(100): try: sl = self._semlock = _billiard.SemLock( kind, value, maxvalue, self._make_name(), unlink_now, ) except (OSError, IOError) as exc: if getattr(exc, 'errno', None) != errno.EEXIST: raise else: break else: exc = IOError('cannot find file for semaphore') exc.errno = errno.EEXIST raise exc else: sl = self._semlock = _billiard.SemLock(kind, value, maxvalue) util.debug('created semlock with handle %s', sl.handle) self._make_methods() if sem_unlink: if sys.platform != 'win32': def _after_fork(obj): obj._semlock._after_fork() util.register_after_fork(self, _after_fork) if _semname(self._semlock) is not None: # We only get here if we are on Unix with forking # disabled. When the object is garbage collected or the # process shuts down we unlink the semaphore name from .semaphore_tracker import register register(self._semlock.name) util.Finalize(self, SemLock._cleanup, (self._semlock.name,), exitpriority=0) @staticmethod def _cleanup(name): from .semaphore_tracker import unregister sem_unlink(name) unregister(name) def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release def __enter__(self): return self._semlock.__enter__() def __exit__(self, *args): return self._semlock.__exit__(*args) def __getstate__(self): context.assert_spawning(self) sl = self._semlock if sys.platform == 'win32': h = context.get_spawning_popen().duplicate_for_child(sl.handle) else: h = sl.handle state = (h, sl.kind, sl.maxvalue) try: state += (sl.name, ) except AttributeError: pass return state def __setstate__(self, state): self._semlock = _billiard.SemLock._rebuild(*state) util.debug('recreated blocker with handle %r', state[0]) self._make_methods() @staticmethod def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand)) class Semaphore(SemLock): def __init__(self, value=1, ctx=None): SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) def get_value(self): return self._semlock._get_value() def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s)>' % (self.__class__.__name__, value) class BoundedSemaphore(Semaphore): def __init__(self, value=1, ctx=None): SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) def __repr__(self): try: value = self._semlock._get_value() except Exception: value = 'unknown' return '<%s(value=%s, maxvalue=%s)>' % ( self.__class__.__name__, value, self._semlock.maxvalue) class Lock(SemLock): ''' Non-recursive lock. ''' def __init__(self, ctx=None): SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name elif self._semlock._get_value() == 1: name = 'None' elif self._semlock._count() > 0: name = 'SomeOtherThread' else: name = 'SomeOtherProcess' except Exception: name = 'unknown' return '<%s(owner=%s)>' % (self.__class__.__name__, name) class RLock(SemLock): ''' Recursive lock ''' def __init__(self, ctx=None): SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) def __repr__(self): try: if self._semlock._is_mine(): name = process.current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name count = self._semlock._count() elif self._semlock._get_value() == 1: name, count = 'None', 0 elif self._semlock._count() > 0: name, count = 'SomeOtherThread', 'nonzero' else: name, count = 'SomeOtherProcess', 'nonzero' except Exception: name, count = 'unknown', 'unknown' return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) class Condition(object): ''' Condition variable ''' def __init__(self, lock=None, ctx=None): assert ctx self._lock = lock or ctx.RLock() self._sleeping_count = ctx.Semaphore(0) self._woken_count = ctx.Semaphore(0) self._wait_semaphore = ctx.Semaphore(0) self._make_methods() def __getstate__(self): context.assert_spawning(self) return (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) def __setstate__(self, state): (self._lock, self._sleeping_count, self._woken_count, self._wait_semaphore) = state self._make_methods() def __enter__(self): return self._lock.__enter__() def __exit__(self, *args): return self._lock.__exit__(*args) def _make_methods(self): self.acquire = self._lock.acquire self.release = self._lock.release def __repr__(self): try: num_waiters = (self._sleeping_count._semlock._get_value() - self._woken_count._semlock._get_value()) except Exception: num_waiters = 'unknown' return '<%s(%s, %s)>' % ( self.__class__.__name__, self._lock, num_waiters) def wait(self, timeout=None): assert self._lock._semlock._is_mine(), \ 'must acquire() condition before using wait()' # indicate that this thread is going to sleep self._sleeping_count.release() # release lock count = self._lock._semlock._count() for i in range(count): self._lock.release() try: # wait for notification or timeout return self._wait_semaphore.acquire(True, timeout) finally: # indicate that this thread has woken self._woken_count.release() # reacquire lock for i in range(count): self._lock.acquire() def notify(self): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire(False) # to take account of timeouts since last notify() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res if self._sleeping_count.acquire(False): # try grabbing a sleeper self._wait_semaphore.release() # wake up one sleeper self._woken_count.acquire() # wait for sleeper to wake # rezero _wait_semaphore in case a timeout just happened self._wait_semaphore.acquire(False) def notify_all(self): assert self._lock._semlock._is_mine(), 'lock is not owned' assert not self._wait_semaphore.acquire(False) # to take account of timeouts since last notify*() we subtract # woken_count from sleeping_count and rezero woken_count while self._woken_count.acquire(False): res = self._sleeping_count.acquire(False) assert res sleepers = 0 while self._sleeping_count.acquire(False): self._wait_semaphore.release() # wake up one sleeper sleepers += 1 if sleepers: for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened while self._wait_semaphore.acquire(False): pass def wait_for(self, predicate, timeout=None): result = predicate() if result: return result if timeout is not None: endtime = monotonic() + timeout else: endtime = None waittime = None while not result: if endtime is not None: waittime = endtime - monotonic() if waittime <= 0: break self.wait(waittime) result = predicate() return result class Event(object): def __init__(self, ctx=None): assert ctx self._cond = ctx.Condition(ctx.Lock()) self._flag = ctx.Semaphore(0) def is_set(self): with self._cond: if self._flag.acquire(False): self._flag.release() return True return False def set(self): with self._cond: self._flag.acquire(False) self._flag.release() self._cond.notify_all() def clear(self): with self._cond: self._flag.acquire(False) def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False # # Barrier # if hasattr(threading, 'Barrier'): class Barrier(threading.Barrier): def __init__(self, parties, action=None, timeout=None, ctx=None): assert ctx import struct from .heap import BufferWrapper wrapper = BufferWrapper(struct.calcsize('i') * 2) cond = ctx.Condition() self.__setstate__((parties, action, timeout, cond, wrapper)) self._state = 0 self._count = 0 def __setstate__(self, state): (self._parties, self._action, self._timeout, self._cond, self._wrapper) = state self._array = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._parties, self._action, self._timeout, self._cond, self._wrapper) @property def _state(self): return self._array[0] @_state.setter def _state(self, value): # noqa self._array[0] = value @property def _count(self): return self._array[1] @_count.setter def _count(self, value): # noqa self._array[1] = value else: class Barrier(object): # noqa def __init__(self, *args, **kwargs): raise NotImplementedError('Barrier only supported on Py3') billiard-3.5.0.3/billiard/einfo.py0000644000175000017500000000707413132743245016640 0ustar omeromer00000000000000from __future__ import absolute_import import sys import traceback __all__ = ['ExceptionInfo', 'Traceback'] DEFAULT_MAX_FRAMES = sys.getrecursionlimit() // 8 class _Code(object): def __init__(self, code): self.co_filename = code.co_filename self.co_name = code.co_name self.co_argcount = code.co_argcount self.co_cellvars = () self.co_firstlineno = code.co_firstlineno self.co_flags = code.co_flags self.co_freevars = () self.co_code = b'' self.co_lnotab = b'' self.co_names = code.co_names self.co_nlocals = code.co_nlocals self.co_stacksize = code.co_stacksize self.co_varnames = () class _Frame(object): Code = _Code def __init__(self, frame): self.f_builtins = {} self.f_globals = { "__file__": frame.f_globals.get("__file__", "__main__"), "__name__": frame.f_globals.get("__name__"), "__loader__": None, } self.f_locals = fl = {} try: fl["__traceback_hide__"] = frame.f_locals["__traceback_hide__"] except KeyError: pass self.f_trace = None self.f_exc_traceback = None self.f_exc_type = None self.f_exc_value = None self.f_code = self.Code(frame.f_code) self.f_lineno = frame.f_lineno self.f_lasti = frame.f_lasti # don't want to hit https://bugs.python.org/issue21967 self.f_restricted = False class _Object(object): def __init__(self, **kw): [setattr(self, k, v) for k, v in kw.items()] class _Truncated(object): def __init__(self): self.tb_lineno = -1 self.tb_frame = _Object( f_globals={"__file__": "", "__name__": "", "__loader__": None}, f_fileno=None, f_code=_Object(co_filename="...", co_name="[rest of traceback truncated]"), ) self.tb_next = None self.tb_lasti = 0 class Traceback(object): Frame = _Frame def __init__(self, tb, max_frames=DEFAULT_MAX_FRAMES, depth=0): self.tb_frame = self.Frame(tb.tb_frame) self.tb_lineno = tb.tb_lineno self.tb_lasti = tb.tb_lasti self.tb_next = None if tb.tb_next is not None: if depth <= max_frames: self.tb_next = Traceback(tb.tb_next, max_frames, depth + 1) else: self.tb_next = _Truncated() class ExceptionInfo(object): """Exception wrapping an exception and its traceback. :param exc_info: The exception info tuple as returned by :func:`sys.exc_info`. """ #: Exception type. type = None #: Exception instance. exception = None #: Pickleable traceback instance for use with :mod:`traceback` tb = None #: String representation of the traceback. traceback = None #: Set to true if this is an internal error. internal = False def __init__(self, exc_info=None, internal=False): self.type, self.exception, tb = exc_info or sys.exc_info() try: self.tb = Traceback(tb) self.traceback = ''.join( traceback.format_exception(self.type, self.exception, tb), ) self.internal = internal finally: del(tb) def __str__(self): return self.traceback def __repr__(self): return "<%s: %r>" % (self.__class__.__name__, self.exception, ) @property def exc_info(self): return self.type, self.exception, self.tb billiard-3.5.0.3/billiard/heap.py0000644000175000017500000002170713132743245016454 0ustar omeromer00000000000000# # Module which supports allocation of memory from an mmap # # multiprocessing/heap.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import bisect import errno import io import mmap import os import sys import threading import tempfile from . import context from . import reduction from . import util from ._ext import _billiard, win32 __all__ = ['BufferWrapper'] PY3 = sys.version_info[0] == 3 # # Inheritable class which wraps an mmap, and from which blocks can be allocated # if sys.platform == 'win32': class Arena(object): _rand = tempfile._RandomNameSequence() def __init__(self, size): self.size = size for i in range(100): name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) buf = mmap.mmap(-1, size, tagname=name) if win32.GetLastError() == 0: break # we have reopened a preexisting map buf.close() else: exc = IOError('Cannot find name for new mmap') exc.errno = errno.EEXIST raise exc self.name = name self.buffer = buf self._state = (self.size, self.name) def __getstate__(self): context.assert_spawning(self) return self._state def __setstate__(self, state): self.size, self.name = self._state = state self.buffer = mmap.mmap(-1, self.size, tagname=self.name) # XXX Temporarily preventing buildbot failures while determining # XXX the correct long-term fix. See issue #23060 # assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS else: class Arena(object): def __init__(self, size, fd=-1): self.size = size self.fd = fd if fd == -1: self.fd, name = tempfile.mkstemp( prefix='pym-%d-' % (os.getpid(), ), dir=util.get_temp_dir(), ) if PY3: os.unlink(name) util.Finalize(self, os.close, (self.fd,)) with io.open(self.fd, 'wb', closefd=False) as f: bs = 1024 * 1024 if size >= bs: zeros = b'\0' * bs for _ in range(size // bs): f.write(zeros) del(zeros) f.write(b'\0' * (size % bs)) assert f.tell() == size else: self.fd = os.open( name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o600, ) util.Finalize(self, os.close, (self.fd,)) os.unlink(name) os.ftruncate(self.fd, size) self.buffer = mmap.mmap(self.fd, self.size) def reduce_arena(a): if a.fd == -1: raise ValueError('Arena is unpicklable because' 'forking was enabled when it was created') return rebuild_arena, (a.size, reduction.DupFd(a.fd)) def rebuild_arena(size, dupfd): return Arena(size, dupfd.detach()) reduction.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas # class Heap(object): _alignment = 8 def __init__(self, size=mmap.PAGESIZE): self._lastpid = os.getpid() self._lock = threading.Lock() self._size = size self._lengths = [] self._len_to_seq = {} self._start_to_block = {} self._stop_to_block = {} self._allocated_blocks = set() self._arenas = [] # list of pending blocks to free - see free() comment below self._pending_free_blocks = [] @staticmethod def _roundup(n, alignment): # alignment must be a power of 2 mask = alignment - 1 return (n + mask) & ~mask def _malloc(self, size): # returns a large enough block -- it might be much larger i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): length = self._roundup(max(self._size, size), mmap.PAGESIZE) self._size *= 2 util.info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] return block def _free(self, block): # free location and try to merge with neighbours (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except KeyError: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block def _absorb(self, block): # deregister this block so it can be merged with a neighbour (arena, start, stop) = block del self._start_to_block[(arena, start)] del self._stop_to_block[(arena, stop)] length = stop - start seq = self._len_to_seq[length] seq.remove(block) if not seq: del self._len_to_seq[length] self._lengths.remove(length) return start, stop def _free_pending_blocks(self): # Free all the blocks in the pending list - called with the lock held while 1: try: block = self._pending_free_blocks.pop() except IndexError: break self._allocated_blocks.remove(block) self._free(block) def free(self, block): # free a block returned by malloc() # Since free() can be called asynchronously by the GC, it could happen # that it's called while self._lock is held: in that case, # self._lock.acquire() would deadlock (issue #12352). To avoid that, a # trylock is used instead, and if the lock can't be acquired # immediately, the block is added to a list of blocks to be freed # synchronously sometimes later from malloc() or free(), by calling # _free_pending_blocks() (appending and retrieving from a list is not # strictly thread-safe but under cPython it's atomic # thanks to the GIL). assert os.getpid() == self._lastpid if not self._lock.acquire(False): # can't acquire the lock right now, add the block to the list of # pending blocks to free self._pending_free_blocks.append(block) else: # we hold the lock try: self._free_pending_blocks() self._allocated_blocks.remove(block) self._free(block) finally: self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) assert 0 <= size < sys.maxsize if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork with self._lock: self._free_pending_blocks() size = self._roundup(max(size, 1), self._alignment) (arena, start, stop) = self._malloc(size) new_stop = start + size if new_stop < stop: self._free((arena, new_stop, stop)) block = (arena, start, new_stop) self._allocated_blocks.add(block) return block # # Class representing a chunk of an mmap -- can be inherited # class BufferWrapper(object): _heap = Heap() def __init__(self, size): assert 0 <= size < sys.maxsize block = BufferWrapper._heap.malloc(size) self._state = (block, size) util.Finalize(self, BufferWrapper._heap.free, args=(block,)) def get_address(self): (arena, start, stop), size = self._state address, length = _billiard.address_of_buffer(arena.buffer) assert size <= length return address + start def get_size(self): return self._state[1] def create_memoryview(self): (arena, start, stop), size = self._state return memoryview(arena.buffer)[start:start + size] billiard-3.5.0.3/billiard/spawn.py0000644000175000017500000002662013132743245016666 0ustar omeromer00000000000000# # Code used to start processes when using the spawn or forkserver # start methods. # # multiprocessing/spawn.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import io import os import pickle import sys import runpy import types import warnings from . import get_start_method, set_start_method from . import process from . import util __all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', 'get_preparation_data', 'get_command_line', 'import_main_path'] W_OLD_DJANGO_LAYOUT = """\ Will add directory %r to path! This is necessary to accommodate \ pre-Django 1.4 layouts using setup_environ. You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \ environment variable. """ # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.platform != 'win32': WINEXE = False WINSERVICE = False else: WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def _module_parent_dir(mod): dir, filename = os.path.split(_module_dir(mod)) if dir == os.curdir or not dir: dir = os.getcwd() return dir def _module_dir(mod): if '__init__.py' in mod.__file__: return os.path.dirname(mod.__file__) return mod.__file__ def _Django_old_layout_hack__save(): if 'DJANGO_PROJECT_DIR' not in os.environ: try: settings_name = os.environ['DJANGO_SETTINGS_MODULE'] except KeyError: return # not using Django. conf_settings = sys.modules.get('django.conf.settings') configured = conf_settings and conf_settings.configured try: project_name, _ = settings_name.split('.', 1) except ValueError: return # not modified by setup_environ project = __import__(project_name) try: project_dir = os.path.normpath(_module_parent_dir(project)) except AttributeError: return # dynamically generated module (no __file__) if configured: warnings.warn(UserWarning( W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir) )) os.environ['DJANGO_PROJECT_DIR'] = project_dir def _Django_old_layout_hack__load(): try: sys.path.append(os.environ['DJANGO_PROJECT_DIR']) except KeyError: pass def set_executable(exe): global _python_exe _python_exe = exe def get_executable(): return _python_exe # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--billiard-fork': return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn_main(**kwds) sys.exit() def get_command_line(**kwds): ''' Returns prefix of command line used for spawning a child process ''' if getattr(sys, 'frozen', False): return ([sys.executable, '--billiard-fork'] + ['%s=%r' % item for item in kwds.items()]) else: prog = 'from billiard.spawn import spawn_main; spawn_main(%s)' prog %= ', '.join('%s=%r' % item for item in kwds.items()) opts = util._args_from_interpreter_flags() return [_python_exe] + opts + ['-c', prog, '--billiard-fork'] def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): ''' Run code specified by data received over pipe ''' assert is_forking(sys.argv) if sys.platform == 'win32': import msvcrt from .reduction import steal_handle new_handle = steal_handle(parent_pid, pipe_handle) fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) else: from . import semaphore_tracker semaphore_tracker._semaphore_tracker._fd = tracker_fd fd = pipe_handle exitcode = _main(fd) sys.exit(exitcode) def _setup_logging_in_child_hack(): # Huge hack to make logging before Process.run work. try: os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__ except KeyError: pass except AttributeError: pass loglevel = os.environ.get("_MP_FORK_LOGLEVEL_") logfile = os.environ.get("_MP_FORK_LOGFILE_") or None format = os.environ.get("_MP_FORK_LOGFORMAT_") if loglevel: from . import util import logging logger = util.get_logger() logger.setLevel(int(loglevel)) if not logger.handlers: logger._rudimentary_setup = True logfile = logfile or sys.__stderr__ if hasattr(logfile, "write"): handler = logging.StreamHandler(logfile) else: handler = logging.FileHandler(logfile) formatter = logging.Formatter( format or util.DEFAULT_LOGGING_FORMAT, ) handler.setFormatter(formatter) logger.addHandler(handler) def _main(fd): _Django_old_layout_hack__load() with io.open(fd, 'rb', closefd=True) as from_parent: process.current_process()._inheriting = True try: preparation_data = pickle.load(from_parent) prepare(preparation_data) _setup_logging_in_child_hack() self = pickle.load(from_parent) finally: del process.current_process()._inheriting return self._bootstrap() def _check_not_importing_main(): if getattr(process.current_process(), '_inheriting', False): raise RuntimeError(''' An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable.''') def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' _check_not_importing_main() d = dict( log_to_stderr=util._log_to_stderr, authkey=process.current_process().authkey, ) if util._logger is not None: d['log_level'] = util._logger.getEffectiveLevel() sys_path = sys.path[:] try: i = sys_path.index('') except ValueError: pass else: sys_path[i] = process.ORIGINAL_DIR d.update( name=name, sys_path=sys_path, sys_argv=sys.argv, orig_dir=process.ORIGINAL_DIR, dir=os.getcwd(), start_method=get_start_method(), ) # Figure out whether to initialise main in the subprocess as a module # or through direct execution (or to leave it alone entirely) main_module = sys.modules['__main__'] try: main_mod_name = main_module.__spec__.name except AttributeError: main_mod_name = main_module.__name__ if main_mod_name is not None: d['init_main_from_name'] = main_mod_name elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): main_path = getattr(main_module, '__file__', None) if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process().authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'start_method' in data: set_start_method(data['start_method']) if 'init_main_from_name' in data: _fixup_main_from_name(data['init_main_from_name']) elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): # __main__.py files for packages, directories, zip archives, etc, run # their "main only" code unconditionally, so we don't even try to # populate anything in __main__, nor do we make any changes to # __main__ attributes current_main = sys.modules['__main__'] if mod_name == "__main__" or mod_name.endswith(".__main__"): return # If this process was forked, __main__ may already be populated if getattr(current_main.__spec__, "name", None) == mod_name: return # Otherwise, __main__ may contain some non-main code where we need to # support unpickling it properly. We rerun it as __mp_main__ and make # the normal __main__ an alias to that old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_module(mod_name, run_name="__mp_main__", alter_sys=True) main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def _fixup_main_from_path(main_path): # If this process was forked, __main__ may already be populated current_main = sys.modules['__main__'] # Unfortunately, the main ipython launch script historically had no # "if __name__ == '__main__'" guard, so we work around that # by treating it like a __main__.py file # See https://github.com/ipython/ipython/issues/4698 main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == 'ipython': return # Otherwise, if __file__ already has the setting we expect, # there's nothing more to do if getattr(current_main, '__file__', None) == main_path: return # If the parent process has sent a path through rather than a module # name we assume it is an executable script that may contain # non-main code that needs to be executed old_main_modules.append(current_main) main_module = types.ModuleType("__mp_main__") main_content = runpy.run_path(main_path, run_name="__mp_main__") main_module.__dict__.update(main_content) sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module def import_main_path(main_path): ''' Set sys.modules['__main__'] to module at main_path ''' _fixup_main_from_path(main_path) billiard-3.5.0.3/billiard/_win.py0000644000175000017500000000564613132743245016477 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """ billiard._win ~~~~~~~~~~~~~ Windows utilities to terminate process groups. """ from __future__ import absolute_import import os # psutil is painfully slow in win32. So to avoid adding big # dependencies like pywin32 a ctypes based solution is preferred # Code based on the winappdbg project http://winappdbg.sourceforge.net/ # (BSD License) from ctypes import ( byref, sizeof, windll, Structure, WinError, POINTER, c_size_t, c_char, c_void_p, ) from ctypes.wintypes import DWORD, LONG ERROR_NO_MORE_FILES = 18 INVALID_HANDLE_VALUE = c_void_p(-1).value class PROCESSENTRY32(Structure): _fields_ = [ ('dwSize', DWORD), ('cntUsage', DWORD), ('th32ProcessID', DWORD), ('th32DefaultHeapID', c_size_t), ('th32ModuleID', DWORD), ('cntThreads', DWORD), ('th32ParentProcessID', DWORD), ('pcPriClassBase', LONG), ('dwFlags', DWORD), ('szExeFile', c_char * 260), ] LPPROCESSENTRY32 = POINTER(PROCESSENTRY32) def CreateToolhelp32Snapshot(dwFlags=2, th32ProcessID=0): hSnapshot = windll.kernel32.CreateToolhelp32Snapshot(dwFlags, th32ProcessID) if hSnapshot == INVALID_HANDLE_VALUE: raise WinError() return hSnapshot def Process32First(hSnapshot, pe=None): return _Process32n(windll.kernel32.Process32First, hSnapshot, pe) def Process32Next(hSnapshot, pe=None): return _Process32n(windll.kernel32.Process32Next, hSnapshot, pe) def _Process32n(fun, hSnapshot, pe=None): if pe is None: pe = PROCESSENTRY32() pe.dwSize = sizeof(PROCESSENTRY32) success = fun(hSnapshot, byref(pe)) if not success: if windll.kernel32.GetLastError() == ERROR_NO_MORE_FILES: return raise WinError() return pe def get_all_processes_pids(): """Return a dictionary with all processes pids as keys and their parents as value. Ignore processes with no parents. """ h = CreateToolhelp32Snapshot() parents = {} pe = Process32First(h) while pe: if pe.th32ParentProcessID: parents[pe.th32ProcessID] = pe.th32ParentProcessID pe = Process32Next(h, pe) return parents def get_processtree_pids(pid, include_parent=True): """Return a list with all the pids of a process tree""" parents = get_all_processes_pids() all_pids = list(parents.keys()) pids = {pid} while 1: pids_new = pids.copy() for _pid in all_pids: if parents[_pid] in pids: pids_new.add(_pid) if pids_new == pids: break pids = pids_new.copy() if not include_parent: pids.remove(pid) return list(pids) def kill_processtree(pid, signum): """Kill a process and all its descendants""" family_pids = get_processtree_pids(pid) for _pid in family_pids: os.kill(_pid, signum) billiard-3.5.0.3/billiard/context.py0000644000175000017500000003162213132743245017220 0ustar omeromer00000000000000from __future__ import absolute_import import os import sys import threading import warnings from . import process __all__ = [] # things are copied from here to __init__.py W_NO_EXECV = """\ force_execv is not supported as the billiard C extension \ is not installed\ """ # # Exceptions # from .exceptions import ( # noqa ProcessError, BufferTooShort, TimeoutError, AuthenticationError, TimeLimitExceeded, SoftTimeLimitExceeded, WorkerLostError, ) # # Base type for contexts # class BaseContext(object): ProcessError = ProcessError BufferTooShort = BufferTooShort TimeoutError = TimeoutError AuthenticationError = AuthenticationError TimeLimitExceeded = TimeLimitExceeded SoftTimeLimitExceeded = SoftTimeLimitExceeded WorkerLostError = WorkerLostError current_process = staticmethod(process.current_process) active_children = staticmethod(process.active_children) if hasattr(os, 'cpu_count'): def cpu_count(self): '''Returns the number of CPUs in the system''' num = os.cpu_count() if num is None: raise NotImplementedError('cannot determine number of cpus') else: return num else: def cpu_count(self): # noqa if sys.platform == 'win32': try: num = int(os.environ['NUMBER_OF_PROCESSORS']) except (ValueError, KeyError): num = 0 elif 'bsd' in sys.platform or sys.platform == 'darwin': comm = '/sbin/sysctl -n hw.ncpu' if sys.platform == 'darwin': comm = '/usr' + comm try: with os.popen(comm) as p: num = int(p.read()) except ValueError: num = 0 else: try: num = os.sysconf('SC_NPROCESSORS_ONLN') except (ValueError, OSError, AttributeError): num = 0 if num >= 1: return num else: raise NotImplementedError('cannot determine number of cpus') def Manager(self): '''Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from .managers import SyncManager m = SyncManager(ctx=self.get_context()) m.start() return m def Pipe(self, duplex=True, rnonblock=False, wnonblock=False): '''Returns two connection object connected by a pipe''' from .connection import Pipe return Pipe(duplex, rnonblock, wnonblock) def Lock(self): '''Returns a non-recursive lock object''' from .synchronize import Lock return Lock(ctx=self.get_context()) def RLock(self): '''Returns a recursive lock object''' from .synchronize import RLock return RLock(ctx=self.get_context()) def Condition(self, lock=None): '''Returns a condition object''' from .synchronize import Condition return Condition(lock, ctx=self.get_context()) def Semaphore(self, value=1): '''Returns a semaphore object''' from .synchronize import Semaphore return Semaphore(value, ctx=self.get_context()) def BoundedSemaphore(self, value=1): '''Returns a bounded semaphore object''' from .synchronize import BoundedSemaphore return BoundedSemaphore(value, ctx=self.get_context()) def Event(self): '''Returns an event object''' from .synchronize import Event return Event(ctx=self.get_context()) def Barrier(self, parties, action=None, timeout=None): '''Returns a barrier object''' from .synchronize import Barrier return Barrier(parties, action, timeout, ctx=self.get_context()) def Queue(self, maxsize=0): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, ctx=self.get_context()) def JoinableQueue(self, maxsize=0): '''Returns a queue object''' from .queues import JoinableQueue return JoinableQueue(maxsize, ctx=self.get_context()) def SimpleQueue(self): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(ctx=self.get_context()) def Pool(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, timeout=None, soft_timeout=None, lost_worker_timeout=None, max_restarts=None, max_restart_freq=1, on_process_up=None, on_process_down=None, on_timeout_set=None, on_timeout_cancel=None, threads=True, semaphore=None, putlocks=False, allow_restart=False): '''Returns a process pool object''' from .pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild, timeout, soft_timeout, lost_worker_timeout, max_restarts, max_restart_freq, on_process_up, on_process_down, on_timeout_set, on_timeout_cancel, threads, semaphore, putlocks, allow_restart, context=self.get_context()) def RawValue(self, typecode_or_type, *args): '''Returns a shared object''' from .sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(self, typecode_or_type, size_or_initializer): '''Returns a shared array''' from .sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(self, typecode_or_type, *args, **kwargs): '''Returns a synchronized shared object''' from .sharedctypes import Value lock = kwargs.get('lock', True) return Value(typecode_or_type, *args, lock=lock, ctx=self.get_context()) def Array(self, typecode_or_type, size_or_initializer, *args, **kwargs): '''Returns a synchronized shared array''' from .sharedctypes import Array lock = kwargs.get('lock', True) return Array(typecode_or_type, size_or_initializer, lock=lock, ctx=self.get_context()) def freeze_support(self): '''Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from .spawn import freeze_support freeze_support() def get_logger(self): '''Return package logger -- if it does not already exist then it is created. ''' from .util import get_logger return get_logger() def log_to_stderr(self, level=None): '''Turn on logging and add a handler which prints to stderr''' from .util import log_to_stderr return log_to_stderr(level) def allow_connection_pickling(self): '''Install support for sending connections and sockets between processes ''' # This is undocumented. In previous versions of multiprocessing # its only effect was to make socket objects inheritable on Windows. from . import connection # noqa def set_executable(self, executable): '''Sets the path to a python.exe or pythonw.exe binary used to run child processes instead of sys.executable when using the 'spawn' start method. Useful for people embedding Python. ''' from .spawn import set_executable set_executable(executable) def set_forkserver_preload(self, module_names): '''Set list of module names to try to load in forkserver process. This is really just a hint. ''' from .forkserver import set_forkserver_preload set_forkserver_preload(module_names) def get_context(self, method=None): if method is None: return self try: ctx = _concrete_contexts[method] except KeyError: raise ValueError('cannot find context for %r' % method) ctx._check_available() return ctx def get_start_method(self, allow_none=False): return self._name def set_start_method(self, method=None): raise ValueError('cannot set start method of concrete context') def forking_is_enabled(self): # XXX for compatibility with billiard <3.4 return (self.get_start_method() or 'fork') == 'fork' def forking_enable(self, value): # XXX for compatibility with billiard <3.4 if not value: from ._ext import supports_exec if supports_exec: self.set_start_method('spawn', force=True) else: warnings.warn(RuntimeWarning(W_NO_EXECV)) def _check_available(self): pass # # Type of default context -- underlying context can be set at most once # class Process(process.BaseProcess): _start_method = None @staticmethod def _Popen(process_obj): return _default_context.get_context().Process._Popen(process_obj) class DefaultContext(BaseContext): Process = Process def __init__(self, context): self._default_context = context self._actual_context = None def get_context(self, method=None): if method is None: if self._actual_context is None: self._actual_context = self._default_context return self._actual_context else: return super(DefaultContext, self).get_context(method) def set_start_method(self, method, force=False): if self._actual_context is not None and not force: raise RuntimeError('context has already been set') if method is None and force: self._actual_context = None return self._actual_context = self.get_context(method) def get_start_method(self, allow_none=False): if self._actual_context is None: if allow_none: return None self._actual_context = self._default_context return self._actual_context._name def get_all_start_methods(self): if sys.platform == 'win32': return ['spawn'] else: from . import reduction if reduction.HAVE_SEND_HANDLE: return ['fork', 'spawn', 'forkserver'] else: return ['fork', 'spawn'] DefaultContext.__all__ = list(x for x in dir(DefaultContext) if x[0] != '_') # # Context types for fixed start method # if sys.platform != 'win32': class ForkProcess(process.BaseProcess): _start_method = 'fork' @staticmethod def _Popen(process_obj): from .popen_fork import Popen return Popen(process_obj) class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_posix import Popen return Popen(process_obj) class ForkServerProcess(process.BaseProcess): _start_method = 'forkserver' @staticmethod def _Popen(process_obj): from .popen_forkserver import Popen return Popen(process_obj) class ForkContext(BaseContext): _name = 'fork' Process = ForkProcess class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess class ForkServerContext(BaseContext): _name = 'forkserver' Process = ForkServerProcess def _check_available(self): from . import reduction if not reduction.HAVE_SEND_HANDLE: raise ValueError('forkserver start method not available') _concrete_contexts = { 'fork': ForkContext(), 'spawn': SpawnContext(), 'forkserver': ForkServerContext(), } _default_context = DefaultContext(_concrete_contexts['fork']) else: class SpawnProcess(process.BaseProcess): _start_method = 'spawn' @staticmethod def _Popen(process_obj): from .popen_spawn_win32 import Popen return Popen(process_obj) class SpawnContext(BaseContext): _name = 'spawn' Process = SpawnProcess _concrete_contexts = { 'spawn': SpawnContext(), } _default_context = DefaultContext(_concrete_contexts['spawn']) # # Force the start method # def _force_start_method(method): _default_context._actual_context = _concrete_contexts[method] # # Check that the current thread is spawning a child process # _tls = threading.local() def get_spawning_popen(): return getattr(_tls, 'spawning_popen', None) def set_spawning_popen(popen): _tls.spawning_popen = popen def assert_spawning(obj): if get_spawning_popen() is None: raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(obj).__name__ ) billiard-3.5.0.3/billiard/reduction.py0000644000175000017500000002232513132743245017530 0ustar omeromer00000000000000# # Module which deals with pickling of objects. # # multiprocessing/reduction.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import functools import io import os import pickle import socket import sys from . import context __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] PY3 = sys.version_info[0] == 3 HAVE_SEND_HANDLE = (sys.platform == 'win32' or (hasattr(socket, 'CMSG_LEN') and hasattr(socket, 'SCM_RIGHTS') and hasattr(socket.socket, 'sendmsg'))) # # Pickler subclass # if PY3: import copyreg class ForkingPickler(pickle.Pickler): '''Pickler subclass used by multiprocessing.''' _extra_reducers = {} _copyreg_dispatch_table = copyreg.dispatch_table def __init__(self, *args): super(ForkingPickler, self).__init__(*args) self.dispatch_table = self._copyreg_dispatch_table.copy() self.dispatch_table.update(self._extra_reducers) @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' cls._extra_reducers[type] = reduce @classmethod def dumps(cls, obj, protocol=None): buf = io.BytesIO() cls(buf, protocol).dump(obj) return buf.getbuffer() @classmethod def loadbuf(cls, buf, protocol=None): return cls.loads(buf.getbuffer()) loads = pickle.loads else: class ForkingPickler(pickle.Pickler): # noqa '''Pickler subclass used by multiprocessing.''' dispatch = pickle.Pickler.dispatch.copy() @classmethod def register(cls, type, reduce): '''Register a reduce function for a type.''' def dispatcher(self, obj): rv = reduce(obj) self.save_reduce(obj=obj, *rv) cls.dispatch[type] = dispatcher @classmethod def dumps(cls, obj, protocol=None): buf = io.BytesIO() cls(buf, protocol).dump(obj) return buf.getvalue() @classmethod def loadbuf(cls, buf, protocol=None): return cls.loads(buf.getvalue()) @classmethod def loads(cls, buf, loads=pickle.loads): if isinstance(buf, io.BytesIO): buf = buf.getvalue() return loads(buf) register = ForkingPickler.register def dump(obj, file, protocol=None): '''Replacement for pickle.dump() using ForkingPickler.''' ForkingPickler(file, protocol).dump(obj) # # Platform specific definitions # if sys.platform == 'win32': # Windows __all__ += ['DupHandle', 'duplicate', 'steal_handle'] from .compat import _winapi def duplicate(handle, target_process=None, inheritable=False): '''Duplicate a handle. (target_process is a handle not a pid!)''' if target_process is None: target_process = _winapi.GetCurrentProcess() return _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, target_process, 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) def steal_handle(source_pid, handle): '''Steal a handle from process identified by source_pid.''' source_process_handle = _winapi.OpenProcess( _winapi.PROCESS_DUP_HANDLE, False, source_pid) try: return _winapi.DuplicateHandle( source_process_handle, handle, _winapi.GetCurrentProcess(), 0, False, _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(source_process_handle) def send_handle(conn, handle, destination_pid): '''Send a handle over a local connection.''' dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) conn.send(dh) def recv_handle(conn): '''Receive a handle over a local connection.''' return conn.recv().detach() class DupHandle(object): '''Picklable wrapper for a handle.''' def __init__(self, handle, access, pid=None): if pid is None: # We just duplicate the handle in the current process and # let the receiving process steal the handle. pid = os.getpid() proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) try: self._handle = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, proc, access, False, 0) finally: _winapi.CloseHandle(proc) self._access = access self._pid = pid def detach(self): '''Get the handle. This should only be called once.''' # retrieve handle from process which currently owns it if self._pid == os.getpid(): # The handle has already been duplicated for this process. return self._handle # We must steal the handle from the process whose pid is self._pid. proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, self._pid) try: return _winapi.DuplicateHandle( proc, self._handle, _winapi.GetCurrentProcess(), self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) finally: _winapi.CloseHandle(proc) else: # Unix __all__ += ['DupFd', 'sendfds', 'recvfds'] import array # On macOS we should acknowledge receipt of fds -- see Issue14669 ACKNOWLEDGE = sys.platform == 'darwin' def sendfds(sock, fds): '''Send an array of fds over an AF_UNIX socket.''' fds = array.array('i', fds) msg = bytes([len(fds) % 256]) sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) if ACKNOWLEDGE and sock.recv(1) != b'A': raise RuntimeError('did not receive acknowledgement of fd') def recvfds(sock, size): '''Receive an array of fds over an AF_UNIX socket.''' a = array.array('i') bytes_size = a.itemsize * size msg, ancdata, flags, addr = sock.recvmsg( 1, socket.CMSG_LEN(bytes_size), ) if not msg and not ancdata: raise EOFError try: if ACKNOWLEDGE: sock.send(b'A') if len(ancdata) != 1: raise RuntimeError( 'received %d items of ancdata' % len(ancdata), ) cmsg_level, cmsg_type, cmsg_data = ancdata[0] if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS): if len(cmsg_data) % a.itemsize != 0: raise ValueError a.frombytes(cmsg_data) assert len(a) % 256 == msg[0] return list(a) except (ValueError, IndexError): pass raise RuntimeError('Invalid data received') def send_handle(conn, handle, destination_pid): # noqa '''Send a handle over a local connection.''' fd = conn.fileno() with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s: sendfds(s, [handle]) def recv_handle(conn): # noqa '''Receive a handle over a local connection.''' fd = conn.fileno() with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s: return recvfds(s, 1)[0] def DupFd(fd): '''Return a wrapper for an fd.''' popen_obj = context.get_spawning_popen() if popen_obj is not None: return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) elif HAVE_SEND_HANDLE: from . import resource_sharer return resource_sharer.DupFd(fd) else: raise ValueError('SCM_RIGHTS appears not to be available') # # Try making some callable types picklable # def _reduce_method(m): if m.__self__ is None: return getattr, (m.__class__, m.__func__.__name__) else: return getattr, (m.__self__, m.__func__.__name__) class _C: def f(self): pass register(type(_C().f), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) register(type(list.append), _reduce_method_descriptor) register(type(int.__add__), _reduce_method_descriptor) def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return functools.partial(func, *args, **keywords) register(functools.partial, _reduce_partial) # # Make sockets picklable # if sys.platform == 'win32': def _reduce_socket(s): from .resource_sharer import DupSocket return _rebuild_socket, (DupSocket(s),) def _rebuild_socket(ds): return ds.detach() register(socket.socket, _reduce_socket) else: def _reduce_socket(s): # noqa df = DupFd(s.fileno()) return _rebuild_socket, (df, s.family, s.type, s.proto) def _rebuild_socket(df, family, type, proto): # noqa fd = df.detach() return socket.socket(family, type, proto, fileno=fd) register(socket.socket, _reduce_socket) billiard-3.5.0.3/billiard/pool.py0000644000175000017500000020202713132743245016504 0ustar omeromer00000000000000# -*- coding: utf-8 -*- # # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import # # Imports # import errno import itertools import os import platform import signal import sys import threading import time import warnings from collections import deque from functools import partial from . import cpu_count, get_context from . import util from .common import ( TERM_SIGNAL, human_status, pickle_loads, reset_signals, restart_state, ) from .compat import get_errno, mem_rss, send_offset from .einfo import ExceptionInfo from .dummy import DummyProcess from .exceptions import ( CoroStop, RestartFreqExceeded, SoftTimeLimitExceeded, Terminated, TimeLimitExceeded, TimeoutError, WorkerLostError, ) from .five import Empty, Queue, range, values, reraise, monotonic from .util import Finalize, debug MAXMEM_USED_FMT = """\ child process exiting after exceeding memory limit ({0}KiB / {1}KiB) """ PY3 = sys.version_info[0] == 3 if platform.system() == 'Windows': # pragma: no cover # On Windows os.kill calls TerminateProcess which cannot be # handled by # any process, so this is needed to terminate the task # *and its children* (if any). from ._win import kill_processtree as _kill # noqa SIGKILL = TERM_SIGNAL else: from os import kill as _kill # noqa SIGKILL = signal.SIGKILL try: TIMEOUT_MAX = threading.TIMEOUT_MAX except AttributeError: # pragma: no cover TIMEOUT_MAX = 1e10 # noqa if sys.version_info >= (3, 3): _Semaphore = threading.Semaphore else: # Semaphore is a factory function pointing to _Semaphore _Semaphore = threading._Semaphore # noqa # # Constants representing the state of a pool # RUN = 0 CLOSE = 1 TERMINATE = 2 # # Constants representing the state of a job # ACK = 0 READY = 1 TASK = 2 NACK = 3 DEATH = 4 # # Exit code constants # EX_OK = 0 EX_FAILURE = 1 EX_RECYCLE = 0x9B # Signal used for soft time limits. SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None) # # Miscellaneous # LOST_WORKER_TIMEOUT = 10.0 EX_OK = getattr(os, "EX_OK", 0) job_counter = itertools.count() Lock = threading.Lock def _get_send_offset(connection): try: native = connection.send_offset except AttributeError: native = None if native is None: return partial(send_offset, connection.fileno()) return native def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) def error(msg, *args, **kwargs): util.get_logger().error(msg, *args, **kwargs) def stop_if_not_current(thread, timeout=None): if thread is not threading.current_thread(): thread.stop(timeout) class LaxBoundedSemaphore(_Semaphore): """Semaphore that checks that # release is <= # acquires, but ignores if # releases >= value.""" def shrink(self): self._initial_value -= 1 self.acquire() if PY3: def __init__(self, value=1, verbose=None): _Semaphore.__init__(self, value) self._initial_value = value def grow(self): with self._cond: self._initial_value += 1 self._value += 1 self._cond.notify() def release(self): cond = self._cond with cond: if self._value < self._initial_value: self._value += 1 cond.notify_all() def clear(self): while self._value < self._initial_value: _Semaphore.release(self) else: def __init__(self, value=1, verbose=None): _Semaphore.__init__(self, value, verbose) self._initial_value = value def grow(self): cond = self._Semaphore__cond with cond: self._initial_value += 1 self._Semaphore__value += 1 cond.notify() def release(self): # noqa cond = self._Semaphore__cond with cond: if self._Semaphore__value < self._initial_value: self._Semaphore__value += 1 cond.notifyAll() def clear(self): # noqa while self._Semaphore__value < self._initial_value: _Semaphore.release(self) # # Exceptions # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, str(self)) def __str__(self): return "Error sending result: '%r'. Reason: '%r'." % ( self.value, self.exc) class WorkersJoined(Exception): """All workers have terminated.""" def soft_timeout_sighandler(signum, frame): raise SoftTimeLimitExceeded() # # Code run by worker processes # class Worker(object): def __init__(self, inq, outq, synq=None, initializer=None, initargs=(), maxtasks=None, sentinel=None, on_exit=None, sigprotection=True, wrap_exception=True, max_memory_per_child=None): assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) self.initializer = initializer self.initargs = initargs self.maxtasks = maxtasks self.max_memory_per_child = max_memory_per_child self._shutdown = sentinel self.on_exit = on_exit self.sigprotection = sigprotection self.inq, self.outq, self.synq = inq, outq, synq self.wrap_exception = wrap_exception # XXX cannot disable yet self.contribute_to_object(self) def contribute_to_object(self, obj): obj.inq, obj.outq, obj.synq = self.inq, self.outq, self.synq obj.inqW_fd = self.inq._writer.fileno() # inqueue write fd obj.outqR_fd = self.outq._reader.fileno() # outqueue read fd if self.synq: obj.synqR_fd = self.synq._reader.fileno() # synqueue read fd obj.synqW_fd = self.synq._writer.fileno() # synqueue write fd obj.send_syn_offset = _get_send_offset(self.synq._writer) else: obj.synqR_fd = obj.synqW_fd = obj._send_syn_offset = None obj._quick_put = self.inq._writer.send obj._quick_get = self.outq._reader.recv obj.send_job_offset = _get_send_offset(self.inq._writer) return obj def __reduce__(self): return self.__class__, ( self.inq, self.outq, self.synq, self.initializer, self.initargs, self.maxtasks, self._shutdown, self.on_exit, self.sigprotection, self.wrap_exception, ) def __call__(self): _exit = sys.exit _exitcode = [None] def exit(status=None): _exitcode[0] = status return _exit() sys.exit = exit pid = os.getpid() self._make_child_methods() self.after_fork() self.on_loop_start(pid=pid) # callback on loop start try: sys.exit(self.workloop(pid=pid)) except Exception as exc: error('Pool process %r error: %r', self, exc, exc_info=1) self._do_exit(pid, _exitcode[0], exc) finally: self._do_exit(pid, _exitcode[0], None) def _do_exit(self, pid, exitcode, exc=None): if exitcode is None: exitcode = EX_FAILURE if exc else EX_OK if self.on_exit is not None: self.on_exit(pid, exitcode) if sys.platform != 'win32': try: self.outq.put((DEATH, (pid, exitcode))) time.sleep(1) finally: os._exit(exitcode) else: os._exit(exitcode) def on_loop_start(self, pid): pass def prepare_result(self, result): return result def workloop(self, debug=debug, now=monotonic, pid=None): pid = pid or os.getpid() put = self.outq.put inqW_fd = self.inqW_fd synqW_fd = self.synqW_fd maxtasks = self.maxtasks max_memory_per_child = self.max_memory_per_child or 0 prepare_result = self.prepare_result wait_for_job = self.wait_for_job _wait_for_syn = self.wait_for_syn def wait_for_syn(jid): i = 0 while 1: if i > 60: error('!!!WAIT FOR ACK TIMEOUT: job:%r fd:%r!!!', jid, self.synq._reader.fileno(), exc_info=1) req = _wait_for_syn() if req: type_, args = req if type_ == NACK: return False assert type_ == ACK return True i += 1 completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): req = wait_for_job() if req: type_, args_ = req assert type_ == TASK job, i, fun, args, kwargs = args_ put((ACK, (job, i, now(), pid, synqW_fd))) if _wait_for_syn: confirm = wait_for_syn(job) if not confirm: continue # received NACK try: result = (True, prepare_result(fun(*args, **kwargs))) except Exception: result = (False, ExceptionInfo()) try: put((READY, (job, i, result, inqW_fd))) except Exception as exc: _, _, tb = sys.exc_info() try: wrapped = MaybeEncodingError(exc, result[1]) einfo = ExceptionInfo(( MaybeEncodingError, wrapped, tb, )) put((READY, (job, i, (False, einfo), inqW_fd))) finally: del(tb) completed += 1 if max_memory_per_child > 0: used_kb = mem_rss() if used_kb <= 0: error('worker unable to determine memory usage') if used_kb > 0 and used_kb > max_memory_per_child: error(MAXMEM_USED_FMT.format( used_kb, max_memory_per_child)) return EX_RECYCLE debug('worker exiting after %d tasks', completed) if maxtasks: return EX_RECYCLE if completed == maxtasks else EX_FAILURE return EX_OK def after_fork(self): if hasattr(self.inq, '_writer'): self.inq._writer.close() if hasattr(self.outq, '_reader'): self.outq._reader.close() if self.initializer is not None: self.initializer(*self.initargs) # Make sure all exiting signals call finally: blocks. # This is important for the semaphore to be released. reset_signals(full=self.sigprotection) # install signal handler for soft timeouts. if SIG_SOFT_TIMEOUT is not None: signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler) try: signal.signal(signal.SIGINT, signal.SIG_IGN) except AttributeError: pass def _make_recv_method(self, conn): get = conn.get if hasattr(conn, '_reader'): _poll = conn._reader.poll if hasattr(conn, 'get_payload') and conn.get_payload: get_payload = conn.get_payload def _recv(timeout, loads=pickle_loads): return True, loads(get_payload()) else: def _recv(timeout): # noqa if _poll(timeout): return True, get() return False, None else: def _recv(timeout): # noqa try: return True, get(timeout=timeout) except Queue.Empty: return False, None return _recv def _make_child_methods(self, loads=pickle_loads): self.wait_for_job = self._make_protected_receive(self.inq) self.wait_for_syn = (self._make_protected_receive(self.synq) if self.synq else None) def _make_protected_receive(self, conn): _receive = self._make_recv_method(conn) should_shutdown = self._shutdown.is_set if self._shutdown else None def receive(debug=debug): if should_shutdown and should_shutdown(): debug('worker got sentinel -- exiting') raise SystemExit(EX_OK) try: ready, req = _receive(1.0) if not ready: return None except (EOFError, IOError) as exc: if get_errno(exc) == errno.EINTR: return None # interrupted, maybe by gdb debug('worker got %s -- exiting', type(exc).__name__) raise SystemExit(EX_FAILURE) if req is None: debug('worker got sentinel -- exiting') raise SystemExit(EX_FAILURE) return req return receive # # Class representing a process pool # class PoolThread(DummyProcess): def __init__(self, *args, **kwargs): DummyProcess.__init__(self) self._state = RUN self._was_started = False self.daemon = True def run(self): try: return self.body() except RestartFreqExceeded as exc: error("Thread %r crashed: %r", type(self).__name__, exc, exc_info=1) _kill(os.getpid(), TERM_SIGNAL) sys.exit() except Exception as exc: error("Thread %r crashed: %r", type(self).__name__, exc, exc_info=1) os._exit(1) def start(self, *args, **kwargs): self._was_started = True super(PoolThread, self).start(*args, **kwargs) def on_stop_not_started(self): pass def stop(self, timeout=None): if self._was_started: self.join(timeout) return self.on_stop_not_started() def terminate(self): self._state = TERMINATE def close(self): self._state = CLOSE class Supervisor(PoolThread): def __init__(self, pool): self.pool = pool super(Supervisor, self).__init__() def body(self): debug('worker handler starting') time.sleep(0.8) pool = self.pool try: # do a burst at startup to verify that we can start # our pool processes, and in that time we lower # the max restart frequency. prev_state = pool.restart_state pool.restart_state = restart_state(10 * pool._processes, 1) for _ in range(10): if self._state == RUN and pool._state == RUN: pool._maintain_pool() time.sleep(0.1) # Keep maintaing workers until the cache gets drained, unless # the pool is termianted pool.restart_state = prev_state while self._state == RUN and pool._state == RUN: pool._maintain_pool() time.sleep(0.8) except RestartFreqExceeded: pool.close() pool.join() raise debug('worker handler exiting') class TaskHandler(PoolThread): def __init__(self, taskqueue, put, outqueue, pool, cache): self.taskqueue = taskqueue self.put = put self.outqueue = outqueue self.pool = pool self.cache = cache super(TaskHandler, self).__init__() def body(self): cache = self.cache taskqueue = self.taskqueue put = self.put for taskseq, set_length in iter(taskqueue.get, None): task = None i = -1 try: for i, task in enumerate(taskseq): if self._state: debug('task handler found thread._state != RUN') break try: put(task) except IOError: debug('could not put task on queue') break except Exception: job, ind = task[:2] try: cache[job]._set(ind, (False, ExceptionInfo())) except KeyError: pass else: if set_length: debug('doing set_length()') set_length(i + 1) continue break except Exception: job, ind = task[:2] if task else (0, 0) if job in cache: cache[job]._set(ind + 1, (False, ExceptionInfo())) if set_length: util.debug('doing set_length()') set_length(i + 1) else: debug('task handler got sentinel') self.tell_others() def tell_others(self): outqueue = self.outqueue put = self.put pool = self.pool try: # tell result handler to finish when cache is empty debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work debug('task handler sending sentinel to workers') for p in pool: put(None) except IOError: debug('task handler got IOError when sending sentinels') debug('task handler exiting') def on_stop_not_started(self): self.tell_others() class TimeoutHandler(PoolThread): def __init__(self, processes, cache, t_soft, t_hard): self.processes = processes self.cache = cache self.t_soft = t_soft self.t_hard = t_hard self._it = None super(TimeoutHandler, self).__init__() def _process_by_pid(self, pid): return next(( (proc, i) for i, proc in enumerate(self.processes) if proc.pid == pid ), (None, None)) def on_soft_timeout(self, job): debug('soft time limit exceeded for %r', job) process, _index = self._process_by_pid(job._worker_pid) if not process: return # Run timeout callback job.handle_timeout(soft=True) try: _kill(job._worker_pid, SIG_SOFT_TIMEOUT) except OSError as exc: if get_errno(exc) != errno.ESRCH: raise def on_hard_timeout(self, job): if job.ready(): return debug('hard time limit exceeded for %r', job) # Remove from cache and set return value to an exception try: raise TimeLimitExceeded(job._timeout) except TimeLimitExceeded: job._set(job._job, (False, ExceptionInfo())) else: # pragma: no cover pass # Remove from _pool process, _index = self._process_by_pid(job._worker_pid) # Run timeout callback job.handle_timeout(soft=False) if process: self._trywaitkill(process) def _trywaitkill(self, worker): debug('timeout: sending TERM to %s', worker._name) try: if os.getpgid(worker.pid) == worker.pid: debug("worker %s is a group leader. It is safe to kill (SIGTERM) the whole group", worker.pid) os.killpg(os.getpgid(worker.pid), signal.SIGTERM) else: worker.terminate() except OSError: pass else: if worker._popen.wait(timeout=0.1): return debug('timeout: TERM timed-out, now sending KILL to %s', worker._name) try: if os.getpgid(worker.pid) == worker.pid: debug("worker %s is a group leader. It is safe to kill (SIGKILL) the whole group", worker.pid) os.killpg(os.getpgid(worker.pid), signal.SIGKILL) else: _kill(worker.pid, SIGKILL) except OSError: pass def handle_timeouts(self): cache = self.cache t_hard, t_soft = self.t_hard, self.t_soft dirty = set() on_soft_timeout = self.on_soft_timeout on_hard_timeout = self.on_hard_timeout def _timed_out(start, timeout): if not start or not timeout: return False if monotonic() >= start + timeout: return True # Inner-loop while self._state == RUN: # Remove dirty items not in cache anymore if dirty: dirty = set(k for k in dirty if k in cache) for i, job in list(cache.items()): ack_time = job._time_accepted soft_timeout = job._soft_timeout if soft_timeout is None: soft_timeout = t_soft hard_timeout = job._timeout if hard_timeout is None: hard_timeout = t_hard if _timed_out(ack_time, hard_timeout): on_hard_timeout(job) elif i not in dirty and _timed_out(ack_time, soft_timeout): on_soft_timeout(job) dirty.add(i) yield def body(self): while self._state == RUN: try: for _ in self.handle_timeouts(): time.sleep(1.0) # don't spin except CoroStop: break debug('timeout handler exiting') def handle_event(self, *args): if self._it is None: self._it = self.handle_timeouts() try: next(self._it) except StopIteration: self._it = None class ResultHandler(PoolThread): def __init__(self, outqueue, get, cache, poll, join_exited_workers, putlock, restart_state, check_timeouts, on_job_ready): self.outqueue = outqueue self.get = get self.cache = cache self.poll = poll self.join_exited_workers = join_exited_workers self.putlock = putlock self.restart_state = restart_state self._it = None self._shutdown_complete = False self.check_timeouts = check_timeouts self.on_job_ready = on_job_ready self._make_methods() super(ResultHandler, self).__init__() def on_stop_not_started(self): # used when pool started without result handler thread. self.finish_at_shutdown(handle_timeouts=True) def _make_methods(self): cache = self.cache putlock = self.putlock restart_state = self.restart_state on_job_ready = self.on_job_ready def on_ack(job, i, time_accepted, pid, synqW_fd): restart_state.R = 0 try: cache[job]._ack(i, time_accepted, pid, synqW_fd) except (KeyError, AttributeError): # Object gone or doesn't support _ack (e.g. IMAPIterator). pass def on_ready(job, i, obj, inqW_fd): if on_job_ready is not None: on_job_ready(job, i, obj, inqW_fd) try: item = cache[job] except KeyError: return if not item.ready(): if putlock is not None: putlock.release() try: item._set(i, obj) except KeyError: pass def on_death(pid, exitcode): try: os.kill(pid, TERM_SIGNAL) except OSError as exc: if get_errno(exc) != errno.ESRCH: raise state_handlers = self.state_handlers = { ACK: on_ack, READY: on_ready, DEATH: on_death } def on_state_change(task): state, args = task try: state_handlers[state](*args) except KeyError: debug("Unknown job state: %s (args=%s)", state, args) self.on_state_change = on_state_change def _process_result(self, timeout=1.0): poll = self.poll on_state_change = self.on_state_change while 1: try: ready, task = poll(timeout) except (IOError, EOFError) as exc: debug('result handler got %r -- exiting', exc) raise CoroStop() if self._state: assert self._state == TERMINATE debug('result handler found thread._state=TERMINATE') raise CoroStop() if ready: if task is None: debug('result handler got sentinel') raise CoroStop() on_state_change(task) if timeout != 0: # blocking break else: break yield def handle_event(self, fileno=None, events=None): if self._state == RUN: if self._it is None: self._it = self._process_result(0) # non-blocking try: next(self._it) except (StopIteration, CoroStop): self._it = None def body(self): debug('result handler starting') try: while self._state == RUN: try: for _ in self._process_result(1.0): # blocking pass except CoroStop: break finally: self.finish_at_shutdown() def finish_at_shutdown(self, handle_timeouts=False): self._shutdown_complete = True get = self.get outqueue = self.outqueue cache = self.cache poll = self.poll join_exited_workers = self.join_exited_workers check_timeouts = self.check_timeouts on_state_change = self.on_state_change time_terminate = None while cache and self._state != TERMINATE: if check_timeouts is not None: check_timeouts() try: ready, task = poll(1.0) except (IOError, EOFError) as exc: debug('result handler got %r -- exiting', exc) return if ready: if task is None: debug('result handler ignoring extra sentinel') continue on_state_change(task) try: join_exited_workers(shutdown=True) except WorkersJoined: now = monotonic() if not time_terminate: time_terminate = now else: if now - time_terminate > 5.0: debug('result handler exiting: timed out') break debug('result handler: all workers terminated, ' 'timeout in %ss', abs(min(now - time_terminate - 5.0, 0))) if hasattr(outqueue, '_reader'): debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (IOError, EOFError): pass debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), self._state) class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True Worker = Worker Supervisor = Supervisor TaskHandler = TaskHandler TimeoutHandler = TimeoutHandler ResultHandler = ResultHandler SoftTimeLimitExceeded = SoftTimeLimitExceeded def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, timeout=None, soft_timeout=None, lost_worker_timeout=None, max_restarts=None, max_restart_freq=1, on_process_up=None, on_process_down=None, on_timeout_set=None, on_timeout_cancel=None, threads=True, semaphore=None, putlocks=False, allow_restart=False, synack=False, on_process_exit=None, context=None, max_memory_per_child=None, enable_timeouts=False, **kwargs): self._ctx = context or get_context() self.synack = synack self._setup_queues() self._taskqueue = Queue() self._cache = {} self._state = RUN self.timeout = timeout self.soft_timeout = soft_timeout self._maxtasksperchild = maxtasksperchild self._max_memory_per_child = max_memory_per_child self._initializer = initializer self._initargs = initargs self._on_process_exit = on_process_exit self.lost_worker_timeout = lost_worker_timeout or LOST_WORKER_TIMEOUT self.on_process_up = on_process_up self.on_process_down = on_process_down self.on_timeout_set = on_timeout_set self.on_timeout_cancel = on_timeout_cancel self.threads = threads self.readers = {} self.allow_restart = allow_restart self.enable_timeouts = bool( enable_timeouts or self.timeout is not None or self.soft_timeout is not None ) if soft_timeout and SIG_SOFT_TIMEOUT is None: warnings.warn(UserWarning( "Soft timeouts are not supported: " "on this platform: It does not have the SIGUSR1 signal.", )) soft_timeout = None self._processes = self.cpu_count() if processes is None else processes self.max_restarts = max_restarts or round(self._processes * 100) self.restart_state = restart_state(max_restarts, max_restart_freq or 1) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') if on_process_exit is not None and not callable(on_process_exit): raise TypeError('on_process_exit must be callable') self._Process = self._ctx.Process self._pool = [] self._poolctrl = {} self.putlocks = putlocks self._putlock = semaphore or LaxBoundedSemaphore(self._processes) for i in range(self._processes): self._create_worker_process(i) self._worker_handler = self.Supervisor(self) if threads: self._worker_handler.start() self._task_handler = self.TaskHandler(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) if threads: self._task_handler.start() self.check_timeouts = None # Thread killing timedout jobs. if self.enable_timeouts: self._timeout_handler = self.TimeoutHandler( self._pool, self._cache, self.soft_timeout, self.timeout, ) self._timeout_handler_mutex = Lock() self._timeout_handler_started = False self._start_timeout_handler() # If running without threads, we need to check for timeouts # while waiting for unfinished work at shutdown. if not threads: self.check_timeouts = self._timeout_handler.handle_event else: self._timeout_handler = None self._timeout_handler_started = False self._timeout_handler_mutex = None # Thread processing results in the outqueue. self._result_handler = self.create_result_handler() self.handle_result_event = self._result_handler.handle_event if threads: self._result_handler.start() self._terminate = Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._worker_handler, self._task_handler, self._result_handler, self._cache, self._timeout_handler, self._help_stuff_finish_args()), exitpriority=15, ) def Process(self, *args, **kwds): return self._Process(*args, **kwds) def WorkerProcess(self, worker): return worker.contribute_to_object(self.Process(target=worker)) def create_result_handler(self, **extra_kwargs): return self.ResultHandler( self._outqueue, self._quick_get, self._cache, self._poll_result, self._join_exited_workers, self._putlock, self.restart_state, self.check_timeouts, self.on_job_ready, **extra_kwargs ) def on_job_ready(self, job, i, obj, inqW_fd): pass def _help_stuff_finish_args(self): return self._inqueue, self._task_handler, self._pool def cpu_count(self): try: return cpu_count() except NotImplementedError: return 1 def handle_result_event(self, *args): return self._result_handler.handle_event(*args) def _process_register_queues(self, worker, queues): pass def _process_by_pid(self, pid): return next(( (proc, i) for i, proc in enumerate(self._pool) if proc.pid == pid ), (None, None)) def get_process_queues(self): return self._inqueue, self._outqueue, None def _create_worker_process(self, i): sentinel = self._ctx.Event() if self.allow_restart else None inq, outq, synq = self.get_process_queues() w = self.WorkerProcess(self.Worker( inq, outq, synq, self._initializer, self._initargs, self._maxtasksperchild, sentinel, self._on_process_exit, # Need to handle all signals if using the ipc semaphore, # to make sure the semaphore is released. sigprotection=self.threads, wrap_exception=self._wrap_exception, max_memory_per_child=self._max_memory_per_child, )) self._pool.append(w) self._process_register_queues(w, (inq, outq, synq)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.index = i w.start() self._poolctrl[w.pid] = sentinel if self.on_process_up: self.on_process_up(w) return w def process_flush_queues(self, worker): pass def _join_exited_workers(self, shutdown=False): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ now = None # The worker may have published a result before being terminated, # but we have no way to accurately tell if it did. So we wait for # _lost_worker_timeout seconds before we mark the job with # WorkerLostError. for job in [job for job in list(self._cache.values()) if not job.ready() and job._worker_lost]: now = now or monotonic() lost_time, lost_ret = job._worker_lost if now - lost_time > job._lost_worker_timeout: self.mark_as_worker_lost(job, lost_ret) if shutdown and not len(self._pool): raise WorkersJoined() cleaned, exitcodes = {}, {} for i in reversed(range(len(self._pool))): worker = self._pool[i] exitcode = worker.exitcode popen = worker._popen if popen is None or exitcode is not None: # worker exited debug('Supervisor: cleaning up worker %d', i) if popen is not None: worker.join() debug('Supervisor: worked %d joined', i) cleaned[worker.pid] = worker exitcodes[worker.pid] = exitcode if exitcode not in (EX_OK, EX_RECYCLE) and \ not getattr(worker, '_controlled_termination', False): error( 'Process %r pid:%r exited with %r', worker.name, worker.pid, human_status(exitcode), exc_info=0, ) self.process_flush_queues(worker) del self._pool[i] del self._poolctrl[worker.pid] if cleaned: all_pids = [w.pid for w in self._pool] for job in list(self._cache.values()): acked_by_gone = next( (pid for pid in job.worker_pids() if pid in cleaned or pid not in all_pids), None ) # already accepted by process if acked_by_gone: self.on_job_process_down(job, acked_by_gone) if not job.ready(): exitcode = exitcodes.get(acked_by_gone) or 0 proc = cleaned.get(acked_by_gone) if proc and getattr(proc, '_job_terminated', False): job._set_terminated(exitcode) else: self.on_job_process_lost( job, acked_by_gone, exitcode, ) else: # started writing to write_to = job._write_to # was scheduled to write to sched_for = job._scheduled_for if write_to and not write_to._is_alive(): self.on_job_process_down(job, write_to.pid) elif sched_for and not sched_for._is_alive(): self.on_job_process_down(job, sched_for.pid) for worker in values(cleaned): if self.on_process_down: if not shutdown: self._process_cleanup_queues(worker) self.on_process_down(worker) return list(exitcodes.values()) return [] def on_partial_read(self, job, worker): pass def _process_cleanup_queues(self, worker): pass def on_job_process_down(self, job, pid_gone): pass def on_job_process_lost(self, job, pid, exitcode): job._worker_lost = (monotonic(), exitcode) def mark_as_worker_lost(self, job, exitcode): try: raise WorkerLostError( 'Worker exited prematurely: {0}.'.format( human_status(exitcode)), ) except WorkerLostError: job._set(None, (False, ExceptionInfo())) else: # pragma: no cover pass def __enter__(self): return self def __exit__(self, *exc_info): return self.terminate() def on_grow(self, n): pass def on_shrink(self, n): pass def shrink(self, n=1): for i, worker in enumerate(self._iterinactive()): self._processes -= 1 if self._putlock: self._putlock.shrink() worker.terminate_controlled() self.on_shrink(1) if i >= n - 1: break else: raise ValueError("Can't shrink pool. All processes busy!") def grow(self, n=1): for i in range(n): self._processes += 1 if self._putlock: self._putlock.grow() self.on_grow(n) def _iterinactive(self): for worker in self._pool: if not self._worker_active(worker): yield worker def _worker_active(self, worker): for job in values(self._cache): if worker.pid in job.worker_pids(): return True return False def _repopulate_pool(self, exitcodes): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(self._processes - len(self._pool)): if self._state != RUN: return try: if exitcodes and exitcodes[i] not in (EX_OK, EX_RECYCLE): self.restart_state.step() except IndexError: self.restart_state.step() self._create_worker_process(self._avail_index()) debug('added worker') def _avail_index(self): assert len(self._pool) < self._processes indices = set(p.index for p in self._pool) return next(i for i in range(self._processes) if i not in indices) def did_start_ok(self): return not self._join_exited_workers() def _maintain_pool(self): """"Clean up any exited workers and start replacements for them. """ joined = self._join_exited_workers() self._repopulate_pool(joined) for i in range(len(joined)): if self._putlock is not None: self._putlock.release() def maintain_pool(self): if self._worker_handler._state == RUN and self._state == RUN: try: self._maintain_pool() except RestartFreqExceeded: self.close() self.join() raise except OSError as exc: if get_errno(exc) == errno.ENOMEM: reraise(MemoryError, MemoryError(str(exc)), sys.exc_info()[2]) raise def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def _poll_result(timeout): if self._outqueue._reader.poll(timeout): return True, self._quick_get() return False, None self._poll_result = _poll_result def _start_timeout_handler(self): # ensure more than one thread does not start the timeout handler # thread at once. if self.threads and self._timeout_handler is not None: with self._timeout_handler_mutex: if not self._timeout_handler_started: self._timeout_handler_started = True self._timeout_handler.start() def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwargs)`. ''' if self._state == RUN: return self.apply_async(func, args, kwds).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' if self._state == RUN: return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' if self._state == RUN: return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' if self._state == RUN: return self.map_async(func, iterable, chunksize).get() def imap(self, func, iterable, chunksize=1, lost_worker_timeout=None): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' if self._state != RUN: return lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout if chunksize == 1: result = IMapIterator(self._cache, lost_worker_timeout=lost_worker_timeout) self._taskqueue.put(( ((TASK, (result._job, i, func, (x,), {})) for i, x in enumerate(iterable)), result._set_length, )) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self._cache, lost_worker_timeout=lost_worker_timeout) self._taskqueue.put(( ((TASK, (result._job, i, mapstar, (x,), {})) for i, x in enumerate(task_batches)), result._set_length, )) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1, lost_worker_timeout=None): ''' Like `imap()` method but ordering of results is arbitrary. ''' if self._state != RUN: return lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout if chunksize == 1: result = IMapUnorderedIterator( self._cache, lost_worker_timeout=lost_worker_timeout, ) self._taskqueue.put(( ((TASK, (result._job, i, func, (x,), {})) for i, x in enumerate(iterable)), result._set_length, )) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator( self._cache, lost_worker_timeout=lost_worker_timeout, ) self._taskqueue.put(( ((TASK, (result._job, i, mapstar, (x,), {})) for i, x in enumerate(task_batches)), result._set_length, )) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None, accept_callback=None, timeout_callback=None, waitforslot=None, soft_timeout=None, timeout=None, lost_worker_timeout=None, callbacks_propagate=(), correlation_id=None): ''' Asynchronous equivalent of `apply()` method. Callback is called when the functions return value is ready. The accept callback is called when the job is accepted to be executed. Simplified the flow is like this: >>> def apply_async(func, args, kwds, callback, accept_callback): ... if accept_callback: ... accept_callback() ... retval = func(*args, **kwds) ... if callback: ... callback(retval) ''' if self._state != RUN: return soft_timeout = soft_timeout or self.soft_timeout timeout = timeout or self.timeout lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout if soft_timeout and SIG_SOFT_TIMEOUT is None: warnings.warn(UserWarning( "Soft timeouts are not supported: " "on this platform: It does not have the SIGUSR1 signal.", )) soft_timeout = None if self._state == RUN: waitforslot = self.putlocks if waitforslot is None else waitforslot if waitforslot and self._putlock is not None: self._putlock.acquire() result = ApplyResult( self._cache, callback, accept_callback, timeout_callback, error_callback, soft_timeout, timeout, lost_worker_timeout, on_timeout_set=self.on_timeout_set, on_timeout_cancel=self.on_timeout_cancel, callbacks_propagate=callbacks_propagate, send_ack=self.send_ack if self.synack else None, correlation_id=correlation_id, ) if timeout or soft_timeout: # start the timeout handler thread when required. self._start_timeout_handler() if self.threads: self._taskqueue.put(([(TASK, (result._job, None, func, args, kwds))], None)) else: self._quick_put((TASK, (result._job, None, func, args, kwds))) return result def send_ack(self, response, job, i, fd): pass def terminate_job(self, pid, sig=None): proc, _ = self._process_by_pid(pid) if proc is not None: try: _kill(pid, sig or TERM_SIGNAL) except OSError as exc: if get_errno(exc) != errno.ESRCH: raise else: proc._controlled_termination = True proc._job_terminated = True def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous equivalent of `map()` method. ''' return self._map_async( func, iterable, mapstar, chunksize, callback, error_callback, ) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' if self._state != RUN: return if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self._cache, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put((((TASK, (result._job, i, mapper, (x,), {})) for i, x in enumerate(task_batches)), None)) return result @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled', ) def close(self): debug('closing pool') if self._state == RUN: self._state = CLOSE if self._putlock: self._putlock.clear() self._worker_handler.close() self._taskqueue.put(None) stop_if_not_current(self._worker_handler) def terminate(self): debug('terminating pool') self._state = TERMINATE self._worker_handler.terminate() self._terminate() @staticmethod def _stop_task_handler(task_handler): stop_if_not_current(task_handler) def join(self): assert self._state in (CLOSE, TERMINATE) debug('joining worker handler') stop_if_not_current(self._worker_handler) debug('joining task handler') self._stop_task_handler(self._task_handler) debug('joining result handler') stop_if_not_current(self._result_handler) debug('result handler joined') for i, p in enumerate(self._pool): debug('joining worker %s/%s (%r)', i + 1, len(self._pool), p) if p._popen is not None: # process started? p.join() debug('pool join complete') def restart(self): for e in values(self._poolctrl): e.set() @staticmethod def _help_stuff_finish(inqueue, task_handler, _pool): # task_handler may be blocked trying to put items on inqueue debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _set_result_sentinel(cls, outqueue, pool): outqueue.put(None) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, worker_handler, task_handler, result_handler, cache, timeout_handler, help_stuff_finish_args): # this is guaranteed to only be called once debug('finalizing pool') worker_handler.terminate() task_handler.terminate() taskqueue.put(None) # sentinel debug('helping task handler/workers to finish') cls._help_stuff_finish(*help_stuff_finish_args) result_handler.terminate() cls._set_result_sentinel(outqueue, pool) if timeout_handler is not None: timeout_handler.terminate() # Terminate workers which haven't already finished if pool and hasattr(pool[0], 'terminate'): debug('terminating workers') for p in pool: if p._is_alive(): p.terminate() debug('joining task handler') cls._stop_task_handler(task_handler) debug('joining result handler') result_handler.stop() if timeout_handler is not None: debug('joining timeout handler') timeout_handler.stop(TIMEOUT_MAX) if pool and hasattr(pool[0], 'terminate'): debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited debug('cleaning up worker %d', p.pid) if p._popen is not None: p.join() debug('pool workers joined') @property def process_sentinels(self): return [w._popen.sentinel for w in self._pool] # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): _worker_lost = None _write_to = None _scheduled_for = None def __init__(self, cache, callback, accept_callback=None, timeout_callback=None, error_callback=None, soft_timeout=None, timeout=None, lost_worker_timeout=LOST_WORKER_TIMEOUT, on_timeout_set=None, on_timeout_cancel=None, callbacks_propagate=(), send_ack=None, correlation_id=None): self.correlation_id = correlation_id self._mutex = Lock() self._event = threading.Event() self._job = next(job_counter) self._cache = cache self._callback = callback self._accept_callback = accept_callback self._error_callback = error_callback self._timeout_callback = timeout_callback self._timeout = timeout self._soft_timeout = soft_timeout self._lost_worker_timeout = lost_worker_timeout self._on_timeout_set = on_timeout_set self._on_timeout_cancel = on_timeout_cancel self._callbacks_propagate = callbacks_propagate or () self._send_ack = send_ack self._accepted = False self._cancelled = False self._worker_pid = None self._time_accepted = None self._terminated = None cache[self._job] = self def __repr__(self): return '<%s: {id} ack:{ack} ready:{ready}>'.format( self.__class__.__name__, id=self._job, ack=self._accepted, ready=self.ready(), ) def ready(self): return self._event.isSet() def accepted(self): return self._accepted def successful(self): assert self.ready() return self._success def _cancel(self): """Only works if synack is used.""" self._cancelled = True def discard(self): self._cache.pop(self._job, None) def terminate(self, signum): self._terminated = signum def _set_terminated(self, signum=None): try: raise Terminated(-(signum or 0)) except Terminated: self._set(None, (False, ExceptionInfo())) def worker_pids(self): return [self._worker_pid] if self._worker_pid else [] def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value.exception def safe_apply_callback(self, fun, *args, **kwargs): if fun: try: fun(*args, **kwargs) except self._callbacks_propagate: raise except Exception as exc: error('Pool callback raised exception: %r', exc, exc_info=1) def handle_timeout(self, soft=False): if self._timeout_callback is not None: self.safe_apply_callback( self._timeout_callback, soft=soft, timeout=self._soft_timeout if soft else self._timeout, ) def _set(self, i, obj): with self._mutex: if self._on_timeout_cancel: self._on_timeout_cancel(self) self._success, self._value = obj self._event.set() if self._accepted: # if not accepted yet, then the set message # was received before the ack, which means # the ack will remove the entry. self._cache.pop(self._job, None) # apply callbacks last if self._callback and self._success: self.safe_apply_callback( self._callback, self._value) if (self._value is not None and self._error_callback and not self._success): self.safe_apply_callback( self._error_callback, self._value) def _ack(self, i, time_accepted, pid, synqW_fd): with self._mutex: if self._cancelled and self._send_ack: self._accepted = True if synqW_fd: return self._send_ack(NACK, pid, self._job, synqW_fd) return self._accepted = True self._time_accepted = time_accepted self._worker_pid = pid if self.ready(): # ack received after set() self._cache.pop(self._job, None) if self._on_timeout_set: self._on_timeout_set(self, self._soft_timeout, self._timeout) response = ACK if self._accept_callback: try: self._accept_callback(pid, time_accepted) except self._propagate_errors: response = NACK raise except Exception: response = NACK # ignore other errors finally: if self._send_ack and synqW_fd: return self._send_ack( response, pid, self._job, synqW_fd ) if self._send_ack and synqW_fd: self._send_ack(response, pid, self._job, synqW_fd) # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, cache, chunksize, length, callback, error_callback): ApplyResult.__init__( self, cache, callback, error_callback=error_callback, ) self._success = True self._length = length self._value = [None] * length self._accepted = [False] * length self._worker_pid = [None] * length self._time_accepted = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del cache[self._job] else: self._number_left = length // chunksize + bool(length % chunksize) def _set(self, i, success_result): success, result = success_result if success: self._value[i * self._chunksize:(i + 1) * self._chunksize] = result self._number_left -= 1 if self._number_left == 0: if self._callback: self._callback(self._value) if self._accepted: self._cache.pop(self._job, None) self._event.set() else: self._success = False self._value = result if self._error_callback: self._error_callback(self._value) if self._accepted: self._cache.pop(self._job, None) self._event.set() def _ack(self, i, time_accepted, pid, *args): start = i * self._chunksize stop = min((i + 1) * self._chunksize, self._length) for j in range(start, stop): self._accepted[j] = True self._worker_pid[j] = pid self._time_accepted[j] = time_accepted if self.ready(): self._cache.pop(self._job, None) def accepted(self): return all(self._accepted) def worker_pids(self): return [pid for pid in self._worker_pid if pid] # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): _worker_lost = None def __init__(self, cache, lost_worker_timeout=LOST_WORKER_TIMEOUT): self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = cache self._items = deque() self._index = 0 self._length = None self._ready = False self._unsorted = {} self._worker_pids = [] self._lost_worker_timeout = lost_worker_timeout cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): with self._cond: try: item = self._items.popleft() except IndexError: if self._index == self._length: self._ready = True raise StopIteration self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: self._ready = True raise StopIteration raise TimeoutError success, value = item if success: return value raise Exception(value) __next__ = next # XXX def _set(self, i, obj): with self._cond: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: self._ready = True del self._cache[self._job] def _set_length(self, length): with self._cond: self._length = length if self._index == self._length: self._ready = True self._cond.notify() del self._cache[self._job] def _ack(self, i, time_accepted, pid, *args): self._worker_pids.append(pid) def ready(self): return self._ready def worker_pids(self): return self._worker_pids # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): with self._cond: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: self._ready = True del self._cache[self._job] # # # class ThreadPool(Pool): from .dummy import Process as DummyProcess Process = DummyProcess def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = Queue() self._outqueue = Queue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _poll_result(timeout): try: return True, self._quick_get(timeout=timeout) except Empty: return False, None self._poll_result = _poll_result @staticmethod def _help_stuff_finish(inqueue, task_handler, pool): # put sentinels at head of inqueue to make workers finish with inqueue.not_empty: inqueue.queue.clear() inqueue.queue.extend([None] * len(pool)) inqueue.not_empty.notify_all() billiard-3.5.0.3/billiard/popen_spawn_posix.py0000644000175000017500000000367513132743245021316 0ustar omeromer00000000000000from __future__ import absolute_import import io import os from . import context from . import popen_fork from . import reduction from . import spawn from .compat import spawnv_passfds __all__ = ['Popen'] # # Wrapper for an fd used while launching a process # class _DupFd(object): def __init__(self, fd): self.fd = fd def detach(self): return self.fd # # Start child process using a fresh interpreter # class Popen(popen_fork.Popen): method = 'spawn' DupFd = _DupFd def __init__(self, process_obj): self._fds = [] super(Popen, self).__init__(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return fd def _launch(self, process_obj): os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1" spawn._Django_old_layout_hack__save() from . import semaphore_tracker tracker_fd = semaphore_tracker.getfd() self._fds.append(tracker_fd) prep_data = spawn.get_preparation_data(process_obj._name) fp = io.BytesIO() context.set_spawning_popen(self) try: reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: context.set_spawning_popen(None) parent_r = child_w = child_r = parent_w = None try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() cmd = spawn.get_command_line(tracker_fd=tracker_fd, pipe_handle=child_r) self._fds.extend([child_r, child_w]) self.pid = spawnv_passfds( spawn.get_executable(), cmd, self._fds, ) self.sentinel = parent_r with io.open(parent_w, 'wb', closefd=False) as f: f.write(fp.getvalue()) finally: for fd in (child_r, child_w, parent_w): if fd is not None: os.close(fd) billiard-3.5.0.3/CHANGES.txt0000644000175000017500000004661613132746064015224 0ustar omeromer000000000000003.5.0.3 - 2017-07-16 -------------------- - Adds Process._authkey alias to .authkey for 2.7 compat. - Remove superfluous else clause from max_memory_per_child_check. - Document and test all supported Python versions. - Extend 'Process' to be compatible with < Py3.5. - Use a properly initialized logger in pool.py error logging. - _trywaitkill can now kill a whole process group if the worker process declares itself as a group leader. - Fix cpython issue 14881 (See http://bugs.python.org/issue14881). - Fix for a crash on windows. - Fix messaging in case of worker exceeds max memory. 3.5.0.2 - 2016-10-03 -------------------- - max_memory_per_child was measured in kilobytes on Linux, but bytes on *BSD/MacOS, it's now always kilobytes. - Windows: Adds support for max_memory_per_child, but requires the ``psutil`` package to be installed. - Fixed bug in ForkingPickler.loadbuf, where it tried to pass a BytesIO instance directly to ``pickle.loads`` on Python 2.7. 3.5.0.1 - 2016-09-07 -------------------- - Connection: Properly handle EINTR (Issue #191). - Fixed bug with missing CreateProcess for Windows on Python 2.7. - Adds Process._counter for compatibility with Python <3.5. 3.5.0.0 - 2016-07-28 -------------------- - No longer supports Python 2.6 You need Python 2.7 or later to use this version of billiard. - Merged changes from CPython 3.5 3.3.0.20 - 2015-04-17 --------------------- - Pool: Timeouts will attempt to send SIGKILL, but this signal does not exist on Windows. Replaced with SIGTERM. 3.3.0.19 - 2014-10-13 --------------------- - Pool: Exceptions in user timeout callbacks are now logged instead of crashing the pool. Contributed by Pierre Fersing. - Pool: Exit codes in errors were improperly being represented as signals. - Pool: ``.map``. and ``.imap`` now working again. - Now builds on FreeBSD 10. Contributed by Michael Fladischer. 3.3.0.18 - 2014-06-20 --------------------- - Now compiles on GNU/kFreeBSD Contributed by Michael Fladischer. - Pool: `AF_PIPE` address fixed so that it works on recent Windows versions in combination with Python 2.7.7. Fix contributed by Joshua Tacoma. - Pool: Fix for `Supervisor object has no attribute _children` error. Fix contributed by Andres Riancho. - Pool: Fixed bug with human_status(None). - Pool: shrink did not work properly if asked to remove more than 1 process. 3.3.0.17 - 2014-04-16 --------------------- - Fixes SemLock on Python 3.4 (Issue #107) when using ``forking_enable(False)``. - Pool: Include more useful exitcode information when processes exit. 3.3.0.16 - 2014-02-11 --------------------- - Previous release was missing the billiard.py3 package from MANIFEST so the installation would not work on Python 3. 3.3.0.15 - 2014-02-10 --------------------- - Pool: Fixed "cannot join process not started" error. - Now uses billiard.py2 and billiard.py3 specific packages that are installed depending on the python version used. This way the installation will not import version specific modules (and possibly crash). 3.3.0.14 - 2014-01-17 --------------------- - Fixed problem with our backwards compatible ``bytes`` wrapper (Issue #103). - No longer expects frozen applications to have a valid ``__file__`` attribute. Fix contributed by George Sibble. 3.3.0.13 - 2013-12-13 --------------------- - Fixes compatability with Python < 2.7.6 - No longer attempts to handle ``SIGBUS`` Contributed by Vishal Vatsa. - Non-thread based pool now only handles signals: ``SIGHUP``, ``SIGQUIT``, ``SIGTERM``, ``SIGUSR1``, ``SIGUSR2``. - setup.py: Only show compilation warning for build related commands. 3.3.0.12 - 2013-12-09 --------------------- - Fixed installation for Python 3. Contributed by Rickert Mulder. - Pool: Fixed bug with maxtasksperchild. Fix contributed by Ionel Cristian Maries. - Pool: Fixed bug in maintain_pool. 3.3.0.11 - 2013-12-03 --------------------- - Fixed Unicode error when installing the distribution (Issue #89). - Daemonic processes are now allowed to have children. But note that it will not be possible to automatically terminate them when the process exits. See discussion at https://github.com/celery/celery/issues/1709 - Pool: Would not always be able to detect that a process exited. 3.3.0.10 - 2013-12-02 --------------------- - Windows: Fixed problem with missing ``WAITABANDONED_0`` Fix contributed by Matthias Wagner - Windows: PipeConnection can now be inherited. Fix contributed by Matthias Wagner 3.3.0.9 - 2013-12-02 -------------------- - Temporary workaround for Celery maxtasksperchild issue. Fix contributed by Ionel Cristian Maries. 3.3.0.8 - 2013-11-21 -------------------- - Now also sets ``multiprocessing.current_process`` for compatibility with loggings ``processName`` field. 3.3.0.7 - 2013-11-15 -------------------- - Fixed compatibility with PyPy 2.1 + 2.2. - Fixed problem in pypy detection. Fix contributed by Tin Tvrtkovic. - Now uses ``ctypes.find_library`` instead of hardcoded path to find the macOS CoreServices framework. Fix contributed by Moritz Kassner. 3.3.0.6 - 2013-11-12 -------------------- - Now works without C extension again. - New ``_billiard.read(fd, buffer, [len, ])`` function implements os.read with buffer support (new buffer API) - New pure-python implementation of ``Connection.send_offset``. 3.3.0.5 - 2013-11-11 -------------------- - All platforms except for Windows/PyPy/Jython now requires the C extension. 3.3.0.4 - 2013-11-11 -------------------- - Fixed problem with Python3 and setblocking. 3.3.0.3 - 2013-11-09 -------------------- - Now works on Windows again. 3.3.0.2 - 2013-11-08 -------------------- - ApplyResult.terminate() may be set to signify that the job must not be executed. It can be used in combination with Pool.terminate_job. - Pipe/_SimpleQueue: Now supports rnonblock/wnonblock arguments to set the read or write end of the pipe to be nonblocking. - Pool: Log message included exception info but exception happened in another process so the resulting traceback was wrong. - Pool: Worker process can now prepare results before they are sent back to the main process (using ``Worker.prepare_result``). 3.3.0.1 - 2013-11-04 -------------------- - Pool: New ``correlation_id`` argument to ``apply_async`` can be used to set a related id for the ``ApplyResult`` object returned: >>> r = pool.apply_async(target, args, kwargs, correlation_id='foo') >>> r.correlation_id 'foo' - Pool: New callback `on_process_exit` is called when a pool process exits, with signature ``(pid, exitcode)``. Contributed by Daniel M. Taub. - Pool: Improved the too many restarts detection. 3.3.0.0 - 2013-10-14 -------------------- - Dual code base now runs on Python 2.6+ and Python 3. - No longer compatible with Python 2.5 - Includes many changes from multiprocessing in 3.4. - Now uses ``time.monotonic`` when available, also including fallback implementations for Linux and macOS. - No longer cleans up after receiving SIGILL, SIGSEGV or SIGFPE Contributed by Kevin Blackham - ``Finalize`` and ``register_after_fork`` is now aliases to multiprocessing. It's better to import these from multiprocessing directly now so that there aren't multiple registries. - New `billiard.queues._SimpleQueue` that does not use semaphores. - Pool: Can now be extended to support using multiple IPC queues. - Pool: Can now use async I/O to write to pool IPC queues. - Pool: New ``Worker.on_loop_stop`` handler can be used to add actions at pool worker process shutdown. Note that, like all finalization handlers, there is no guarantee that this will be executed. Contributed by dmtaub. 2.7.3.30 - 2013-06-28 --------------------- - Fixed ImportError in billiard._ext 2.7.3.29 - 2013-06-28 --------------------- - Compilation: Fixed improper handling of HAVE_SEM_OPEN (Issue #55) Fix contributed by Krzysztof Jagiello. - Process now releases logging locks after fork. This previously happened in Pool, but it was done too late as processes logs when they bootstrap. - Pool.terminate_job now ignores `No such process` errors. - billiard.Pool entrypoint did not support new arguments to billiard.pool.Pool - Connection inbound buffer size increased from 1kb to 128kb. - C extension cleaned up by properly adding a namespace to symbols. - _exit_function now works even if thread wakes up after gc collect. 2.7.3.28 - 2013-04-16 --------------------- - Pool: Fixed regression that disabled the deadlock fix in 2.7.3.24 - Pool: RestartFreqExceeded could be raised prematurely. - Process: Include pid in startup and process INFO logs. 2.7.3.27 - 2013-04-12 --------------------- - Manager now works again. - Python 3 fixes for billiard.connection. - Fixed invalid argument bug when running on Python 3.3 Fix contributed by Nathan Wan. - Ignore OSError when setting up signal handlers. 2.7.3.26 - 2013-04-09 --------------------- - Pool: Child processes must ignore SIGINT. 2.7.3.25 - 2013-04-09 --------------------- - Pool: 2.7.3.24 broke support for subprocesses (Issue #48). Signals that should be ignored were instead handled by terminating. 2.7.3.24 - 2013-04-08 --------------------- - Pool: Make sure finally blocks are called when process exits due to a signal. This fixes a deadlock problem when the process is killed while having acquired the shared semaphore. However, this solution does not protect against the processes being killed, a more elaborate solution is required for that. Hopefully this will be fixed soon in a later version. - Pool: Can now use GDB to debug pool child processes. - Fixes Python 3 compatibility problems. Contributed by Albertas Agejevas. 2.7.3.23 - 2013-03-22 --------------------- - Windows: Now catches SystemExit from setuptools while trying to build the C extension (Issue #41). 2.7.3.22 - 2013-03-08 --------------------- - Pool: apply_async now supports a ``callbacks_propagate`` keyword argument that can be a tuple of exceptions to propagate in callbacks. (callback, errback, accept_callback, timeout_callback). - Errors are no longer logged for OK and recycle exit codes. This would cause normal maxtasksperchild recycled process to log an error. - Fixed Python 2.5 compatibility problem (Issue #33). - FreeBSD: Compilation now disables semaphores if Python was built without it (Issue #40). Contributed by William Grzybowski 2.7.3.21 - 2013-02-11 --------------------- - Fixed typo EX_REUSE -> EX_RECYCLE - Code now conforms to new pep8.py rules. 2.7.3.20 - 2013-02-08 --------------------- - Pool: Disable restart limit if maxR is not set. - Pool: Now uses os.kill instead of signal.signal. Contributed by Lukasz Langa - Fixed name error in process.py - Pool: ApplyResult.get now properly raises exceptions. Fix contributed by xentac. 2.7.3.19 - 2012-11-30 --------------------- - Fixes problem at shutdown when gc has collected symbols. - Pool now always uses _kill for Py2.5 compatibility on Windows (Issue #32). - Fixes Python 3 compatibility issues 2.7.3.18 - 2012-11-05 --------------------- - [Pool] Fix for check_timeouts if not set. Fix contributed by Dmitry Sukhov - Fixed pickle problem with Traceback. Code.frame.__loader__ is now ignored as it may be set to an unpickleable object. - The Django old-layout warning was always showing. 2.7.3.17 - 2012-09-26 --------------------- - Fixes typo 2.7.3.16 - 2012-09-26 --------------------- - Windows: Fixes for SemLock._rebuild (Issue #24). - Pool: Job terminated with terminate_job now raises billiard.exceptions.Terminated. 2.7.3.15 - 2012-09-21 --------------------- - Windows: Fixes unpickling of SemLock when using fallback. - Windows: Fixes installation when no C compiler. 2.7.3.14 - 2012-09-20 --------------------- - Installation now works again for Python 3. 2.7.3.13 - 2012-09-14 --------------------- - Merged with Python trunk (many authors, many fixes: see Python changelog in trunk). - Using execv now also works with older Django projects using setup_environ (Issue #10). - Billiard now installs with a warning that the C extension could not be built if a compiler is not installed or the build fails in some other way. It really is recommended to have the C extension installed when running with force execv, but this change also makes it easier to install. - Pool: Hard timeouts now sends KILL shortly after TERM so that C extensions cannot block the signal. Python signal handlers are called in the interpreter, so they cannot be called while a C extension is blocking the interpreter from running. - Now uses a timeout value for Thread.join that doesn't exceed the maximum on some platforms. - Fixed bug in the SemLock fallback used when C extensions not installed. Fix contributed by Mher Movsisyan. - Pool: Now sets a Process.index attribute for every process in the pool. This number will always be between 0 and concurrency-1, and can be used to e.g. create a logfile for each process in the pool without creating a new logfile whenever a process is replaced. 2.7.3.12 - 2012-08-05 --------------------- - Fixed Python 2.5 compatibility issue. - New Pool.terminate_job(pid) to terminate a job without raising WorkerLostError 2.7.3.11 - 2012-08-01 --------------------- - Adds support for FreeBSD 7+ Fix contributed by koobs. - Pool: New argument ``allow_restart`` is now required to enable the pool process sentinel that is required to restart the pool. It's disabled by default, which reduces the number of file descriptors/semaphores required to run the pool. - Pool: Now emits a warning if a worker process exited with error-code. But not if the error code is 155, which is now returned if the worker process was recycled (maxtasksperchild). - Python 3 compatibility fixes. - Python 2.5 compatibility fixes. 2.7.3.10 - 2012-06-26 --------------------- - The ``TimeLimitExceeded`` exception string representation only included the seconds as a number, it now gives a more human friendly description. - Fixed typo in ``LaxBoundedSemaphore.shrink``. - Pool: ``ResultHandler.handle_event`` no longer requires any arguments. - setup.py bdist now works 2.7.3.9 - 2012-06-03 -------------------- - Environment variable ``MP_MAIN_FILE`` envvar is now set to the path of the ``__main__`` module when execv is enabled. - Pool: Errors occurring in the TaskHandler are now reported. 2.7.3.8 - 2012-06-01 -------------------- - Can now be installed on Py 3.2 - Issue #12091: simplify ApplyResult and MapResult with threading.Event Patch by Charles-Francois Natali - Pool: Support running without TimeoutHandler thread. - The with_*_thread arguments has also been replaced with a single `threads=True` argument. - Two new pool callbacks: - ``on_timeout_set(job, soft, hard)`` Applied when a task is executed with a timeout. - ``on_timeout_cancel(job)`` Applied when a timeout is cancelled (the job completed) 2.7.3.7 - 2012-05-21 -------------------- - Fixes Python 2.5 support. 2.7.3.6 - 2012-05-21 -------------------- - Pool: Can now be used in an event loop, without starting the supporting threads (TimeoutHandler still not supported) To facilitate this the pool has gained the following keyword arguments: - ``with_task_thread`` - ``with_result_thread`` - ``with_supervisor_thread`` - ``on_process_up`` Callback called with Process instance as argument whenever a new worker process is added. Used to add new process fds to the eventloop:: def on_process_up(proc): hub.add_reader(proc.sentinel, pool.maintain_pool) - ``on_process_down`` Callback called with Process instance as argument whenever a new worker process is found dead. Used to remove process fds from the eventloop:: def on_process_down(proc): hub.remove(proc.sentinel) - ``semaphore`` Sets the semaphore used to protect from adding new items to the pool when no processes available. The default is a threaded one, so this can be used to change to an async semaphore. And the following attributes:: - ``readers`` A map of ``fd`` -> ``callback``, to be registered in an eventloop. Currently this is only the result outqueue with a callback that processes all currently incoming results. And the following methods:: - ``did_start_ok`` To be called after starting the pool, and after setting up the eventloop with the pool fds, to ensure that the worker processes didn't immediately exit caused by an error (internal/memory). - ``maintain_pool`` Public version of ``_maintain_pool`` that handles max restarts. - Pool: Process too frequent restart protection now only counts if the process had a non-successful exit-code. This to take into account the maxtasksperchild option, and allowing processes to exit cleanly on their own. - Pool: New options max_restart + max_restart_freq This means that the supervisor can't restart processes faster than max_restart' times per max_restart_freq seconds (like the Erlang supervisor maxR & maxT settings). The pool is closed and joined if the max restart frequency is exceeded, where previously it would keep restarting at an unlimited rate, possibly crashing the system. The current default value is to stop if it exceeds 100 * process_count restarts in 1 seconds. This may change later. It will only count processes with an unsuccessful exit code, this is to take into account the ``maxtasksperchild`` setting and code that voluntarily exits. - Pool: The ``WorkerLostError`` message now includes the exit-code of the process that disappeared. 2.7.3.5 - 2012-05-09 -------------------- - Now always cleans up after ``sys.exc_info()`` to avoid cyclic references. - ExceptionInfo without arguments now defaults to ``sys.exc_info``. - Forking can now be disabled using the ``MULTIPROCESSING_FORKING_DISABLE`` environment variable. Also this envvar is set so that the behavior is inherited after execv. - The semaphore cleanup process started when execv is used now sets a useful process name if the ``setproctitle`` module is installed. - Sets the ``FORKED_BY_MULTIPROCESSING`` environment variable if forking is disabled. 2.7.3.4 - 2012-04-27 -------------------- - Added `billiard.ensure_multiprocessing()` Raises NotImplementedError if the platform does not support multiprocessing (e.g. Jython). 2.7.3.3 - 2012-04-23 -------------------- - PyPy now falls back to using its internal _multiprocessing module, so everything works except for forking_enable(False) (which silently degrades). - Fixed Python 2.5 compat. issues. - Uses more with statements - Merged some of the changes from the Python 3 branch. 2.7.3.2 - 2012-04-20 -------------------- - Now installs on PyPy/Jython (but does not work). 2.7.3.1 - 2012-04-20 -------------------- - Python 2.5 support added. 2.7.3.0 - 2012-04-20 -------------------- - Updated from Python 2.7.3 - Python 2.4 support removed, now only supports 2.5, 2.6 and 2.7. (may consider py3k support at some point). - Pool improvements from Celery. - no-execv patch added (http://bugs.python.org/issue8713) billiard-3.5.0.3/t/0000755000175000017500000000000013132746522013640 5ustar omeromer00000000000000billiard-3.5.0.3/t/unit/0000755000175000017500000000000013132746522014617 5ustar omeromer00000000000000billiard-3.5.0.3/t/unit/test_package.py0000644000175000017500000000024313132743245017621 0ustar omeromer00000000000000from __future__ import absolute_import import billiard def test_has_version(): assert billiard.__version__ assert isinstance(billiard.__version__, str) billiard-3.5.0.3/t/unit/__init__.py0000644000175000017500000000103013132743245016721 0ustar omeromer00000000000000from __future__ import absolute_import import atexit def teardown(): # Workaround for multiprocessing bug where logging # is attempted after global already collected at shutdown. cancelled = set() try: import multiprocessing.util cancelled.add(multiprocessing.util._exit_function) except (AttributeError, ImportError): pass try: atexit._exithandlers[:] = [ e for e in atexit._exithandlers if e[0] not in cancelled ] except AttributeError: pass billiard-3.5.0.3/t/unit/test_dummy.py0000644000175000017500000000103313132743245017357 0ustar omeromer00000000000000from __future__ import absolute_import import threading import billiard.dummy class test_restart_state: def test_raises(self): class Thread(threading.Thread): exception = None def run(self): try: billiard.dummy.Process().start() except BaseException as e: self.exception = e thread = Thread() thread.start() thread.join(0.1) assert not thread.is_alive() assert thread.exception is None billiard-3.5.0.3/t/unit/test_pool.py0000644000175000017500000000034713132743245017204 0ustar omeromer00000000000000from __future__ import absolute_import import billiard.pool class test_pool: def test_raises(self): pool = billiard.pool.Pool() assert pool.did_start_ok() is True pool.close() pool.terminate() billiard-3.5.0.3/t/unit/test_win32.py0000644000175000017500000000335313132743245017175 0ustar omeromer00000000000000from __future__ import absolute_import import pytest from case import skip from billiard.compat import _winapi @skip.unless_win32() class test_win32_module: @pytest.mark.parametrize('name', [ 'NULL', 'ERROR_ALREADY_EXISTS', 'ERROR_PIPE_BUSY', 'ERROR_PIPE_CONNECTED', 'ERROR_SEM_TIMEOUT', 'ERROR_MORE_DATA', 'ERROR_BROKEN_PIPE', 'ERROR_IO_PENDING', 'ERROR_NETNAME_DELETED', 'GENERIC_READ', 'GENERIC_WRITE', 'DUPLICATE_SAME_ACCESS', 'DUPLICATE_CLOSE_SOURCE', 'INFINITE', 'NMPWAIT_WAIT_FOREVER', 'OPEN_EXISTING', 'PIPE_ACCESS_DUPLEX', 'PIPE_ACCESS_INBOUND', 'PIPE_READMODE_MESSAGE', 'PIPE_TYPE_MESSAGE', 'PIPE_UNLIMITED_INSTANCES', 'PIPE_WAIT', 'PROCESS_ALL_ACCESS', 'PROCESS_DUP_HANDLE', 'WAIT_OBJECT_0', 'WAIT_ABANDONED_0', 'WAIT_TIMEOUT', 'FILE_FLAG_FIRST_PIPE_INSTANCE', 'FILE_FLAG_OVERLAPPED', ]) def test_constants(self, name): assert getattr(_winapi, name) is not None @pytest.mark.parametrize('name', [ 'Overlapped', 'CloseHandle', 'GetLastError', 'OpenProcess', 'ExitProcess', 'ConnectNamedPipe', 'CreateFile', 'WriteFile', 'ReadFile', 'CreateNamedPipe', 'SetNamedPipeHandleState', 'WaitNamedPipe', 'PeekNamedPipe', 'WaitForMultipleObjects', 'WaitForSingleObject', 'GetCurrentProcess', 'GetExitCodeProcess', 'TerminateProcess', 'DuplicateHandle', 'CreatePipe', ]) def test_functions(self, name): assert getattr(_winapi, name) billiard-3.5.0.3/t/unit/test_common.py0000644000175000017500000000602513132743245017522 0ustar omeromer00000000000000from __future__ import absolute_import import os import pytest import signal from contextlib import contextmanager from time import time from case import Mock, call, patch, skip from billiard.common import ( _shutdown_cleanup, reset_signals, restart_state, ) def signo(name): return getattr(signal, name) @contextmanager def termsigs(default, full): from billiard import common prev_def, common.TERMSIGS_DEFAULT = common.TERMSIGS_DEFAULT, default prev_full, common.TERMSIGS_FULL = common.TERMSIGS_FULL, full try: yield finally: common.TERMSIGS_DEFAULT, common.TERMSIGS_FULL = prev_def, prev_full @skip.if_win32() class test_reset_signals: def test_shutdown_handler(self): with patch('sys.exit') as exit: _shutdown_cleanup(15, Mock()) exit.assert_called() assert os.WTERMSIG(exit.call_args[0][0]) == 15 def test_does_not_reset_ignored_signal(self, sigs=['SIGTERM']): with self.assert_context(sigs, [], signal.SIG_IGN) as (_, SET): SET.assert_not_called() def test_does_not_reset_if_current_is_None(self, sigs=['SIGTERM']): with self.assert_context(sigs, [], None) as (_, SET): SET.assert_not_called() def test_resets_for_SIG_DFL(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']): with self.assert_context(sigs, [], signal.SIG_DFL) as (_, SET): SET.assert_has_calls([ call(signo(sig), _shutdown_cleanup) for sig in sigs ]) def test_resets_for_obj(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']): with self.assert_context(sigs, [], object()) as (_, SET): SET.assert_has_calls([ call(signo(sig), _shutdown_cleanup) for sig in sigs ]) def test_handles_errors(self, sigs=['SIGTERM']): for exc in (OSError(), AttributeError(), ValueError(), RuntimeError()): with self.assert_context(sigs, [], signal.SIG_DFL, exc) as (_, S): S.assert_called() @contextmanager def assert_context(self, default, full, get_returns=None, set_effect=None): with termsigs(default, full): with patch('signal.getsignal') as GET: with patch('signal.signal') as SET: GET.return_value = get_returns SET.side_effect = set_effect reset_signals() GET.assert_has_calls([ call(signo(sig)) for sig in default ]) yield GET, SET class test_restart_state: def test_raises(self): s = restart_state(100, 1) # max 100 restarts in 1 second. s.R = 99 s.step() with pytest.raises(s.RestartFreqExceeded): s.step() def test_time_passed_resets_counter(self): s = restart_state(100, 10) s.R, s.T = 100, time() with pytest.raises(s.RestartFreqExceeded): s.step() s.R, s.T = 100, time() s.step(time() + 20) assert s.R == 1 billiard-3.5.0.3/t/__init__.py0000644000175000017500000000000013132743245015736 0ustar omeromer00000000000000billiard-3.5.0.3/t/integration/0000755000175000017500000000000013132746522016163 5ustar omeromer00000000000000billiard-3.5.0.3/t/integration/__init__.py0000644000175000017500000000012413132743245020270 0ustar omeromer00000000000000import os import sys sys.path.insert(0, os.pardir) sys.path.insert(0, os.getcwd()) billiard-3.5.0.3/t/integration/setup.py0000644000175000017500000000265113132743245017700 0ustar omeromer00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- try: from setuptools import setup from setuptools.command.install import install except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup # noqa from setuptools.command.install import install # noqa class no_install(install): def run(self, *args, **kwargs): import sys sys.stderr.write(""" ------------------------------------------------------- The billiard functional test suite cannot be installed. ------------------------------------------------------- But you can execute the tests by running the command: $ python setup.py test """) setup( name='billiard-funtests', version='DEV', description='Functional test suite for billiard', author='Ask Solem', author_email='ask@celeryproject.org', url='http://github.com/celery/billiard', platforms=['any'], packages=[], data_files=[], zip_safe=False, cmdclass={'install': no_install}, test_suite='nose.collector', build_requires=[ 'nose', 'unittest2', 'coverage>=3.0', ], classifiers=[ 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: C' 'License :: OSI Approved :: BSD License', 'Intended Audience :: Developers', ], long_description='Do not install this package', ) billiard-3.5.0.3/t/integration/tests/0000755000175000017500000000000013132746522017325 5ustar omeromer00000000000000billiard-3.5.0.3/t/integration/tests/__init__.py0000644000175000017500000000022513132743245021434 0ustar omeromer00000000000000import os import sys sys.path.insert(0, os.path.join(os.getcwd(), os.pardir)) print(sys.path[0]) sys.path.insert(0, os.getcwd()) print(sys.path[0]) billiard-3.5.0.3/t/integration/tests/test_multiprocessing.py0000644000175000017500000016704413132743245024200 0ustar omeromer00000000000000#!/usr/bin/env python from __future__ import absolute_import # # Unit tests for the multiprocessing package # import unittest import Queue import time import sys import os import gc import array import random import logging from nose import SkipTest from test import test_support from StringIO import StringIO try: from billiard._ext import _billiard except ImportError as exc: raise SkipTest(exc) # import threading after _billiard to raise a more revelant error # message: "No module named _billiard". _billiard is not compiled # without thread support. import threading # Work around broken sem_open implementations try: import billiard.synchronize except ImportError as exc: raise SkipTest(exc) import billiard.dummy import billiard.connection import billiard.managers import billiard.heap import billiard.pool from billiard import util from billiard.compat import bytes latin = str # Constants LOG_LEVEL = util.SUBWARNING DELTA = 0.1 # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected CHECK_TIMINGS = False if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_billiard, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") # Some tests require ctypes try: from ctypes import Structure, c_int, c_double except ImportError: Structure = object c_int = c_double = None try: from ctypes import Value except ImportError: Value = None try: from ctypes import copy as ctypes_copy except ImportError: ctypes_copy = None class TimingWrapper(object): """Creates a wrapper for a function which records the time it takes to finish""" def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.time() try: return self.func(*args, **kwds) finally: self.elapsed = time.time() - t class BaseTestCase(object): """Base class for test cases""" ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) def get_value(self): """Return the value of a semaphore""" try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError class _TestProcesses(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': return current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def _test(self, q, *args, **kwds): current = self.current_process() q.put(args) q.put(kwds) q.put(current.name) if self.TYPE != 'threads': q.put(bytes(current.authkey, 'ascii')) q.put(current.pid) def test_process(self): q = self.Queue(1) e = self.Event() # noqa args = (q, 1, 2) kwargs = {'hello': 23, 'bye': 2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEquals(p.authkey, current.authkey) self.assertEquals(p.is_alive(), False) self.assertEquals(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEquals(p.exitcode, None) self.assertEquals(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEquals(q.get(), args[1:]) self.assertEquals(q.get(), kwargs) self.assertEquals(q.get(), p.name) if self.TYPE != 'threads': self.assertEquals(q.get(), current.authkey) self.assertEquals(q.get(), p.pid) p.join() self.assertEquals(p.exitcode, 0) self.assertEquals(p.is_alive(), False) self.assertNotIn(p, self.active_children()) def _test_terminate(self): time.sleep(1000) def test_terminate(self): if self.TYPE == 'threads': return p = self.Process(target=self._test_terminate) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) p.terminate() join = TimingWrapper(p.join) self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() # XXX sometimes get p.exitcode == 0 on Windows ... # self.assertEqual(p.exitcode, -signal.SIGTERM) def test_cpu_count(self): try: cpus = billiard.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) def _test_recursion(self, wconn, id): __import__('billiard.forking') wconn.send(id) if len(id) < 2: for i in range(2): p = self.Process( target=self._test_recursion, args=(wconn, id + [i]) ) p.start() p.join() def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) class _UpperCaser(billiard.Process): def __init__(self): billiard.Process.__init__(self) self.child_conn, self.parent_conn = billiard.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): def _test_put(self, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(Queue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() def _test_get(self, queue, child_can_start, parent_can_continue): child_can_start.wait() queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # ## Hangs unexpectedly, remove for now # self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(Queue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(Queue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(Queue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() def _test_fork(self, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(Queue.Empty, queue.get, False) p.join() def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: return q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) def _test_task_done(self, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'): self.skipTest("requires 'queue.task_done()' method") workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in xrange(4)] for p in workers: p.start() for i in xrange(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # ## Currently fails on OS/X # if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': return sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): def f(self, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() # wait for them all to sleep for i in xrange(6): sleeping.acquire() # check they have all timed out for i in xrange(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() # wait for them to all sleep for i in xrange(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken time.sleep(DELTA) self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, None) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) class _TestEvent(BaseTestCase): def _test_event(self, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporaily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() # self.assertEqual(event.is_set(), False) self.Process(target=self._test_event, args=(event,)).start() self.assertEqual(wait(), True) class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('c', latin('x'), latin('y')) ] def _test(self, values): for sv, cv in zip(values, self.codes_values): sv.value = cv[2] @unittest.skipIf(c_int is None, "requires _ctypes") def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawvalue(self): self.test_value(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() # noqa obj1 = val1.get_obj() # noqa val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() # noqa obj2 = val2.get_obj() # noqa lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() # noqa obj3 = val3.get_obj() # noqa self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) def f(self, seq): for i in range(1, len(seq)): seq[i] += seq[i - 1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', range(10)) lock1 = arr1.get_lock() # noqa obj1 = arr1.get_obj() # noqa arr2 = self.Array('i', range(10), lock=None) lock2 = arr2.get_lock() # noqa obj2 = arr2.get_obj() # noqa lock = self.Lock() arr3 = self.Array('i', range(10), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() # noqa self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(range(10)) self.assertEqual(a[:], range(10)) b = self.list() self.assertEqual(b[:], []) b.extend(range(5)) self.assertEqual(b[:], range(5)) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2, 3, 4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], range(10)) d = [a, b] e = self.list(d) self.assertEqual( e[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']]) def test_dict(self): d = self.dict() indices = range(65, 70) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((j, chr(j)) for j in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(z) for z in indices]) self.assertEqual(sorted(d.items()), [(x, chr(x)) for x in indices]) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) def sqr(x, wait=0.0): time.sleep(wait) return x * x class _TestPool(BaseTestCase): def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x': 3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10))) self.assertEqual(pmap(sqr, range(100), chunksize=20), map(sqr, range(100))) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except billiard.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2)) get = TimingWrapper(res.get) self.assertRaises(billiard.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, range(10)) self.assertEqual(list(it), map(sqr, range(10))) it = self.pool.imap(sqr, range(10)) for i in range(10): self.assertEqual(it.next(), i * i) self.assertRaises(StopIteration, it.next) it = self.pool.imap(sqr, range(1000), chunksize=100) for i in range(1000): self.assertEqual(it.next(), i * i) self.assertRaises(StopIteration, it.next) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, range(1000)) self.assertEqual(sorted(it), map(sqr, range(1000))) it = self.pool.imap_unordered(sqr, range(1000), chunksize=53) self.assertEqual(sorted(it), map(sqr, range(1000))) def test_make_pool(self): p = billiard.Pool(3) self.assertEqual(3, len(p._pool)) p.close() p.join() def test_terminate(self): if self.TYPE == 'manager': # On Unix a forked process increfs each shared object to # which its parent process held a reference. If the # forked process gets terminated then there is likely to # be a reference leak. So to prevent # _TestZZZNumberOfObjects from failing we skip this test # when using a manager. return self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() self.assertTrue(join.elapsed < 0.2) class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): p = billiard.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive countdown = 5 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [worker.pid for worker in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() class _TestZZZNumberOfObjects(BaseTestCase): # Test that manager has expected number of shared objects left # Because test cases are sorted alphabetically, this one will get # run after all the other tests for the manager. It tests that # there have been no "reference leaks" for the manager's shared # objects. Note the comment in _TestPool.test_terminate(). ALLOWED_TYPES = ('manager',) def test_number_of_objects(self): EXPECTED_NUMBER = 1 # the pool object is still alive billiard.active_children() # discard dead process objs gc.collect() # do garbage collection refs = self.manager._number_of_objects() debug_info = self.manager._debug_info() if refs != EXPECTED_NUMBER: print(self.manager._debug_info()) print(debug_info) self.assertEqual(refs, EXPECTED_NUMBER) # Test of creating a customized manager class from billiard.managers import BaseManager, BaseProxy, RemoteError # noqa class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in xrange(10): yield i * i class IteratorProxy(BaseProxy): _exposed_ = ('next', '__next__') def __iter__(self): return self def next(self): return self._callmethod('next') def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i * i for i in range(10)]) manager.shutdown() _queue = Queue.Queue() # Test of connecting to a remote server and using xmlrpclib for serialization def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def _putter(self, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() queue.put(('hello world', None, True, 2.25)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=('localhost', 0), authkey=authkey, serializer=SERIALIZER ) manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple self.assertEqual(queue.get(), ['hello world', None, True, 2.25]) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue manager.shutdown() class _TestManagerRestart(BaseTestCase): def _putter(self, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=('localhost', 0), authkey=authkey, serializer=SERIALIZER) addr = manager.get_server().address manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) manager.start() manager.shutdown() SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def _echo(self, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', range(4)) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0] * 10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0] * 10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except billiard.BufferTooShort as exc: self.assertEqual(exc.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(IOError, reader.send, 2) self.assertRaises(IOError, writer.recv) self.assertRaises(IOError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': return msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7 + 8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def _test(self, address): conn = self.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() ''' class _TestPicklingConnections(BaseTestCase): """Test of sending connection and socket objects between processes""" ALLOWED_TYPES = ('processes',) def _listener(self, conn, families): for fam in families: l = self.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) if self.TYPE == 'processes': l = socket.socket() l.bind(('localhost', 0)) conn.send(l.getsockname()) l.listen(1) new_conn, addr = l.accept() conn.send(new_conn) conn.recv() def _remote(self, conn): for (address, msg) in iter(conn.recv, None): client = self.connection.Client(address) client.send(msg.upper()) client.close() if self.TYPE == 'processes': address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): try: billiard.allow_connection_pickling() except ImportError: return families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) if self.TYPE == 'processes': msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) if hasattr(socket, 'fromfd'): new_conn = lconn.recv() self.assertEqual(new_conn.recv(100), msg.upper()) else: # XXX On Windows with Py2.6 need to backport fromfd() discard = lconn.recv_bytes() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() ''' class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # create and destroy lots of blocks of different sizes for i in xrange(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = billiard.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] # get the heap object heap = billiard.heap.BufferWrapper._heap # verify the state of the heap all = [] occupied = 0 for L in heap._len_to_seq.values(): for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop - start, 'free')) for arena, start, stop in heap._allocated_blocks: all.append((heap._arenas.index(arena), start, stop, stop - start, 'occupied')) occupied += stop - start all.sort() for i in range(len(all) - 1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i + 1][:3] self.assertTrue((arena != narena and nstart == 0) or (stop == nstart)) class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes', ) def _double(self, x, y, foo, arr, string): x.value *= 2 y.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 @unittest.skipIf(Value is None, "requires ctypes.Value") def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0 / 3.0, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', range(10), lock=lock) string = self.Array('c', 20, lock=lock) string.value = 'hello' p = self.Process(target=self._double, args=(x, y, foo, arr, string)) p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0 / 3.0) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i * 2) self.assertEqual(string.value, latin('hellohello')) @unittest.skipIf(Value is None, "requires ctypes.Value") def test_synchronize(self): self.test_sharedctypes(lock=True) @unittest.skipIf(ctypes_copy is None, "requires ctypes.copy") def test_copy(self): foo = _Foo(2, 5.0) bar = ctypes_copy(foo) foo.x = 0 foo.y = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def _test_finalize(self, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call mutliprocessing's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) class _TestImportStar(BaseTestCase): """Test that from ... import * works for each module""" ALLOWED_TYPES = ('processes',) def test_import(self): modules = [ 'billiard', 'billiard.connection', 'billiard.heap', 'billiard.managers', 'billiard.pool', 'billiard.process', 'billiard.reduction', 'billiard.synchronize', 'billiard.util' ] if c_int is not None: # This module requires _ctypes modules.append('billiard.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] for attr in getattr(mod, '__all__', ()): self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) class _TestLogging(BaseTestCase): """Quick test that logging works -- does not test logging output""" ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = billiard.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) def _test_level(self, conn): logger = billiard.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = billiard.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = billiard.Pipe(duplex=False) logger.setLevel(LEVEL1) self.Process(target=self._test_level, args=(writer,)).start() self.assertEqual(LEVEL1, reader.recv()) logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) self.Process(target=self._test_level, args=(writer,)).start() self.assertEqual(LEVEL2, reader.recv()) root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == billiard.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'billiard.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = _billiard.Connection(44977608) self.assertRaises(IOError, conn.poll) self.assertRaises(IOError, _billiard.Connection, -1) def get_attributes(Source, names): d = {} for name in names: obj = getattr(Source, name) if type(obj) == type(get_attributes): obj = staticmethod(obj) d[name] = obj return d def create_test_cases(Mixin, type): result = {} glob = globals() Type = type.capitalize() for name in glob.keys(): if name.startswith('_Test'): base = glob[name] if type in base.ALLOWED_TYPES: newname = 'With' + Type + name[1:] class Temp(base, unittest.TestCase, Mixin): pass result[newname] = Temp Temp.__name__ = newname Temp.__module__ = Mixin.__module__ return result class ProcessesMixin(object): TYPE = 'processes' Process = billiard.Process locals().update(get_attributes(billiard, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'RawValue', 'RawArray', 'current_process', 'active_children', 'Pipe', 'connection', 'JoinableQueue' ))) testcases_processes = create_test_cases(ProcessesMixin, type='processes') globals().update(testcases_processes) class ManagerMixin(object): TYPE = 'manager' Process = billiard.Process manager = object.__new__(billiard.managers.SyncManager) locals().update(get_attributes(manager, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'list', 'dict', 'Namespace', 'JoinableQueue' ))) testcases_manager = create_test_cases(ManagerMixin, type='manager') globals().update(testcases_manager) class ThreadsMixin(object): TYPE = 'threads' Process = billiard.dummy.Process locals().update(get_attributes(billiard.dummy, ( 'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Value', 'Array', 'current_process', 'active_children', 'Pipe', 'connection', 'dict', 'list', 'Namespace', 'JoinableQueue' ))) testcases_threads = create_test_cases(ThreadsMixin, type='threads') globals().update(testcases_threads) class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return bytes('something bogus') def send_bytes(self, data): pass self.assertRaises(billiard.AuthenticationError, billiard.connection.deliver_challenge, _FakeConnection(), bytes('abc')) def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return billiard.connection.CHALLENGE elif self.count == 2: return bytes('something bogus') return bytes('') def send_bytes(self, data): pass self.assertRaises(billiard.AuthenticationError, billiard.connection.answer_challenge, _FakeConnection(), bytes('abc')) def initializer(ns): ns.test += 1 class TestInitializers(unittest.TestCase): """Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 """ def setUp(self): self.mgr = billiard.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() def test_manager_initializer(self): m = billiard.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() def test_pool_initializer(self): self.assertRaises(TypeError, billiard.Pool, initializer=1) p = billiard.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) def _ThisSubProcess(q): try: q.get(block=False) except Queue.Empty: pass def _TestProcess(q): """Issue 5155, 5313, 5331: Test process in processes Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior """ queue = billiard.Queue() subProc = billiard.Process(target=_ThisSubProcess, args=(queue,)) subProc.start() subProc.join() def _afunc(x): return x * x def pool_in_process(): pool = billiard.Pool(processes=4) pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): queue = billiard.Queue() proc = billiard.Process(target=_TestProcess, args=(queue,)) proc.start() proc.join() def test_pool_in_process(self): p = billiard.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = StringIO() flike = _file_like(sio) flike.write('foo') proc = billiard.Process(target=lambda: flike.flush()) self.assertTrue(proc) flike.flush() assert sio.getvalue() == 'foo' testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, TestStdinBadfiledescriptor] def test_main(run=None): if sys.platform.startswith("linux"): try: billiard.RLock() except OSError: raise SkipTest("OSError raises on RLock creation, see issue 3111!") if run is None: from test.test_support import run_unittest as run util.get_temp_dir() # creates temp directory for use by all processes billiard.get_logger().setLevel(LOG_LEVEL) ProcessesMixin.pool = billiard.Pool(4) ThreadsMixin.pool = billiard.dummy.Pool(4) ManagerMixin.manager.__init__() ManagerMixin.manager.start() ManagerMixin.pool = ManagerMixin.manager.Pool(4) testcases = ( sorted(testcases_processes.values(), key=lambda tc: tc.__name__) + sorted(testcases_threads.values(), key=lambda tc: tc.__name__) + sorted(testcases_manager.values(), key=lambda tc: tc.__name__) + testcases_other ) loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases) # (ncoghlan): Whether or not sys.exc_clear is executed by the threading # module during these tests is at least platform dependent and possibly # non-deterministic on any given platform. So we don't mind if the listed # warnings aren't actually raised. with test_support.check_py3k_warnings( (".+__(get|set)slice__ has been removed", DeprecationWarning), (r"sys.exc_clear\(\) not supported", DeprecationWarning), quiet=True): run(suite) ThreadsMixin.pool.terminate() ProcessesMixin.pool.terminate() ManagerMixin.pool.terminate() ManagerMixin.manager.shutdown() del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool def main(): test_main(unittest.TextTestRunner(verbosity=2).run) if __name__ == '__main__': main() billiard-3.5.0.3/Modules/0000755000175000017500000000000013132746522015005 5ustar omeromer00000000000000billiard-3.5.0.3/Modules/_billiard/0000755000175000017500000000000013132746522016726 5ustar omeromer00000000000000billiard-3.5.0.3/Modules/_billiard/multiprocessing.c0000644000175000017500000002373113132743245022326 0ustar omeromer00000000000000/* * Extension module used by multiprocessing package * * multiprocessing.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocessing.h" #ifdef SCM_RIGHTS #define HAVE_FD_TRANSFER 1 #else #define HAVE_FD_TRANSFER 0 #endif /* * Function which raises exceptions based on error codes */ PyObject * Billiard_SetError(PyObject *Type, int num) { switch (num) { case MP_SUCCESS: break; #ifdef MS_WINDOWS case MP_STANDARD_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, 0); break; case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetExcFromWindowsErr(Type, WSAGetLastError()); break; #else /* !MS_WINDOWS */ case MP_STANDARD_ERROR: case MP_SOCKET_ERROR: if (Type == NULL) Type = PyExc_OSError; PyErr_SetFromErrno(Type); break; #endif /* !MS_WINDOWS */ case MP_MEMORY_ERROR: PyErr_NoMemory(); break; case MP_END_OF_FILE: PyErr_SetNone(PyExc_EOFError); break; case MP_EARLY_END_OF_FILE: PyErr_SetString(PyExc_IOError, "got end of file during message"); break; case MP_BAD_MESSAGE_LENGTH: PyErr_SetString(PyExc_IOError, "bad message length"); break; case MP_EXCEPTION_HAS_BEEN_SET: break; default: PyErr_Format(PyExc_RuntimeError, "unknown error number %d", num); } return NULL; } /* * Windows only */ #ifdef MS_WINDOWS /* On Windows we set an event to signal Ctrl-C; compare with timemodule.c */ HANDLE sigint_event = NULL; static BOOL WINAPI ProcessingCtrlHandler(DWORD dwCtrlType) { SetEvent(sigint_event); return FALSE; } static PyObject * Billiard_closesocket(PyObject *self, PyObject *args) { HANDLE handle; int ret; if (!PyArg_ParseTuple(args, F_HANDLE ":closesocket" , &handle)) return NULL; Py_BEGIN_ALLOW_THREADS ret = closesocket((SOCKET) handle); Py_END_ALLOW_THREADS if (ret) return PyErr_SetExcFromWindowsErr(PyExc_IOError, WSAGetLastError()); Py_RETURN_NONE; } static PyObject * Billiard_recv(PyObject *self, PyObject *args) { HANDLE handle; int size, nread; PyObject *buf; if (!PyArg_ParseTuple(args, F_HANDLE "i:recv" , &handle, &size)) return NULL; buf = PyBytes_FromStringAndSize(NULL, size); if (!buf) return NULL; Py_BEGIN_ALLOW_THREADS nread = recv((SOCKET) handle, PyBytes_AS_STRING(buf), size, 0); Py_END_ALLOW_THREADS if (nread < 0) { Py_DECREF(buf); return PyErr_SetExcFromWindowsErr(PyExc_IOError, WSAGetLastError()); } _PyBytes_Resize(&buf, nread); return buf; } static PyObject * Billiard_send(PyObject *self, PyObject *args) { HANDLE handle; Py_buffer buf; int ret, length; if (!PyArg_ParseTuple(args, F_HANDLE "y*:send" , &handle, &buf)) return NULL; length = (int)Py_MIN(buf.len, INT_MAX); Py_BEGIN_ALLOW_THREADS ret = send((SOCKET) handle, buf.buf, length, 0); Py_END_ALLOW_THREADS PyBuffer_Release(&buf); if (ret < 0) return PyErr_SetExcFromWindowsErr(PyExc_IOError, WSAGetLastError()); return PyLong_FromLong(ret); } /* * Unix only */ #else /* !MS_WINDOWS */ #if HAVE_FD_TRANSFER /* Functions for transferring file descriptors between processes. Reimplements some of the functionality of the fdcred module at http://www.mca-ltd.com/resources/fdcred_1.tgz. */ static PyObject * Billiard_multiprocessing_sendfd(PyObject *self, PyObject *args) { int conn, fd, res; char dummy_char; char buf[CMSG_SPACE(sizeof(int))]; struct msghdr msg = {0}; struct iovec dummy_iov; struct cmsghdr *cmsg; if (!PyArg_ParseTuple(args, "ii", &conn, &fd)) return NULL; dummy_iov.iov_base = &dummy_char; dummy_iov.iov_len = 1; msg.msg_control = buf; msg.msg_controllen = sizeof(buf); msg.msg_iov = &dummy_iov; msg.msg_iovlen = 1; cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof(int)); msg.msg_controllen = cmsg->cmsg_len; *(int*)CMSG_DATA(cmsg) = fd; Py_BEGIN_ALLOW_THREADS res = sendmsg(conn, &msg, 0); Py_END_ALLOW_THREADS if (res < 0) return PyErr_SetFromErrno(PyExc_OSError); Py_RETURN_NONE; } static PyObject * Billiard_multiprocessing_recvfd(PyObject *self, PyObject *args) { int conn, fd, res; char dummy_char; char buf[CMSG_SPACE(sizeof(int))]; struct msghdr msg = {0}; struct iovec dummy_iov; struct cmsghdr *cmsg; if (!PyArg_ParseTuple(args, "i", &conn)) return NULL; dummy_iov.iov_base = &dummy_char; dummy_iov.iov_len = 1; msg.msg_control = buf; msg.msg_controllen = sizeof(buf); msg.msg_iov = &dummy_iov; msg.msg_iovlen = 1; cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof(int)); msg.msg_controllen = cmsg->cmsg_len; Py_BEGIN_ALLOW_THREADS res = recvmsg(conn, &msg, 0); Py_END_ALLOW_THREADS if (res < 0) return PyErr_SetFromErrno(PyExc_OSError); fd = *(int*)CMSG_DATA(cmsg); return Py_BuildValue("i", fd); } #endif /* HAVE_FD_TRANSFER */ #endif /* !MS_WINDOWS */ /* * All platforms */ static PyObject* Billiard_multiprocessing_address_of_buffer(PyObject *self, PyObject *obj) { void *buffer; Py_ssize_t buffer_len; if (PyObject_AsWriteBuffer(obj, &buffer, &buffer_len) < 0) return NULL; return Py_BuildValue("N" F_PY_SSIZE_T, PyLong_FromVoidPtr(buffer), buffer_len); } #if !defined(MS_WINDOWS) static PyObject * Billiard_read(PyObject *self, PyObject *args) { int fd; Py_buffer view; Py_ssize_t buflen, recvlen = 0; char *buf = NULL; Py_ssize_t n = 0; if (!PyArg_ParseTuple(args, "iw*|n", &fd, &view, &recvlen)) return NULL; buflen = view.len; buf = view.buf; if (recvlen < 0) { PyBuffer_Release(&view); PyErr_SetString(PyExc_ValueError, "negative len for read"); return NULL; } if (recvlen == 0) { recvlen = buflen; } if (buflen < recvlen) { PyBuffer_Release(&view); PyErr_SetString(PyExc_ValueError, "Buffer too small for requested bytes"); return NULL; } if (buflen < 0 || buflen == 0) { errno = EINVAL; goto bail; } // Requires Python 2.7 //if (!_PyVerify_fd(fd)) goto bail; Py_BEGIN_ALLOW_THREADS n = read(fd, buf, recvlen); Py_END_ALLOW_THREADS if (n < 0) goto bail; PyBuffer_Release(&view); return PyInt_FromSsize_t(n); bail: PyBuffer_Release(&view); return PyErr_SetFromErrno(PyExc_OSError); } # endif /* !MS_WINDOWS */ /* * Function table */ static PyMethodDef Billiard_module_methods[] = { {"address_of_buffer", Billiard_multiprocessing_address_of_buffer, METH_O, "address_of_buffer(obj) -> int\n\n" "Return address of obj assuming obj supports buffer inteface"}, #if HAVE_FD_TRANSFER {"sendfd", Billiard_multiprocessing_sendfd, METH_VARARGS, "sendfd(sockfd, fd) -> None\n\n" "Send file descriptor given by fd over the unix domain socket\n" "whose file descriptor is sockfd"}, {"recvfd", Billiard_multiprocessing_recvfd, METH_VARARGS, "recvfd(sockfd) -> fd\n\n" "Receive a file descriptor over a unix domain socket\n" "whose file descriptor is sockfd"}, #endif #if !defined(MS_WINDOWS) {"read", Billiard_read, METH_VARARGS, "read(fd, buffer) -> bytes\n\n" "Read from file descriptor into buffer."}, #endif #ifdef MS_WINDOWS {"closesocket", Billiard_closesocket, METH_VARARGS, ""}, {"recv", Billiard_recv, METH_VARARGS, ""}, {"send", Billiard_send, METH_VARARGS, ""}, #endif #ifndef POSIX_SEMAPHORES_NOT_ENABLED {"sem_unlink", Billiard_semlock_unlink, METH_VARARGS, ""}, #endif {NULL} }; /* * Initialize */ PyMODINIT_FUNC init_billiard(void) { PyObject *module, *temp, *value; /* Initialize module */ module = Py_InitModule("_billiard", Billiard_module_methods); if (!module) return; #if defined(MS_WINDOWS) || \ (defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED)) /* Add SemLock type to module */ if (PyType_Ready(&BilliardSemLockType) < 0) return; Py_INCREF(&BilliardSemLockType); PyDict_SetItemString(BilliardSemLockType.tp_dict, "SEM_VALUE_MAX", Py_BuildValue("i", SEM_VALUE_MAX)); PyModule_AddObject(module, "SemLock", (PyObject*)&BilliardSemLockType); #endif #ifdef MS_WINDOWS /* Initialize the event handle used to signal Ctrl-C */ sigint_event = CreateEvent(NULL, TRUE, FALSE, NULL); if (!sigint_event) { PyErr_SetFromWindowsErr(0); return; } if (!SetConsoleCtrlHandler(ProcessingCtrlHandler, TRUE)) { PyErr_SetFromWindowsErr(0); return; } #endif /* Add configuration macros */ temp = PyDict_New(); if (!temp) return; #define ADD_FLAG(name) \ value = Py_BuildValue("i", name); \ if (value == NULL) { Py_DECREF(temp); return; } \ if (PyDict_SetItemString(temp, #name, value) < 0) { \ Py_DECREF(temp); Py_DECREF(value); return; } \ Py_DECREF(value) #if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) ADD_FLAG(HAVE_SEM_OPEN); #endif #ifdef HAVE_SEM_TIMEDWAIT ADD_FLAG(HAVE_SEM_TIMEDWAIT); #endif #ifdef HAVE_FD_TRANSFER ADD_FLAG(HAVE_FD_TRANSFER); #endif #ifdef HAVE_BROKEN_SEM_GETVALUE ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE); #endif #ifdef HAVE_BROKEN_SEM_UNLINK ADD_FLAG(HAVE_BROKEN_SEM_UNLINK); #endif if (PyModule_AddObject(module, "flags", temp) < 0) return; } billiard-3.5.0.3/Modules/_billiard/semaphore.c0000644000175000017500000004546013132743245021065 0ustar omeromer00000000000000/* * A type which wraps a semaphore * * semaphore.c * * Copyright (c) 2006-2008, R Oudkerk * Licensed to PSF under a Contributor Agreement. */ #include "multiprocessing.h" enum { RECURSIVE_MUTEX, SEMAPHORE }; typedef struct { PyObject_HEAD SEM_HANDLE handle; long last_tid; int count; int maxvalue; int kind; char *name; } BilliardSemLockObject; #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) #ifdef MS_WINDOWS /* * Windows definitions */ #define SEM_FAILED NULL #define SEM_CLEAR_ERROR() SetLastError(0) #define SEM_GET_LAST_ERROR() GetLastError() #define SEM_CREATE(name, val, max) CreateSemaphore(NULL, val, max, NULL) #define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1) #define SEM_GETVALUE(sem, pval) _Billiard_GetSemaphoreValue(sem, pval) #define SEM_UNLINK(name) 0 static int _Billiard_GetSemaphoreValue(HANDLE handle, long *value) { long previous; switch (WaitForSingleObjectEx(handle, 0, FALSE)) { case WAIT_OBJECT_0: if (!ReleaseSemaphore(handle, 1, &previous)) return MP_STANDARD_ERROR; *value = previous + 1; return 0; case WAIT_TIMEOUT: *value = 0; return 0; default: return MP_STANDARD_ERROR; } } static PyObject * Billiard_semlock_acquire(BilliardSemLockObject *self, PyObject *args, PyObject *kwds) { int blocking = 1; double timeout; PyObject *timeout_obj = Py_None; DWORD res, full_msecs, msecs, start, ticks; static char *kwlist[] = {"block", "timeout", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist, &blocking, &timeout_obj)) return NULL; /* calculate timeout */ if (!blocking) { full_msecs = 0; } else if (timeout_obj == Py_None) { full_msecs = INFINITE; } else { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; timeout *= 1000.0; /* convert to millisecs */ if (timeout < 0.0) { timeout = 0.0; } else if (timeout >= 0.5 * INFINITE) { /* 25 days */ PyErr_SetString(PyExc_OverflowError, "timeout is too large"); return NULL; } full_msecs = (DWORD)(timeout + 0.5); } /* check whether we already own the lock */ if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } /* check whether we can acquire without blocking */ if (WaitForSingleObjectEx(self->handle, 0, FALSE) == WAIT_OBJECT_0) { self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; } msecs = full_msecs; start = GetTickCount(); for ( ; ; ) { HANDLE handles[2] = {self->handle, sigint_event}; /* do the wait */ Py_BEGIN_ALLOW_THREADS ResetEvent(sigint_event); res = WaitForMultipleObjectsEx(2, handles, FALSE, msecs, FALSE); Py_END_ALLOW_THREADS /* handle result */ if (res != WAIT_OBJECT_0 + 1) break; /* got SIGINT so give signal handler a chance to run */ Sleep(1); /* if this is main thread let KeyboardInterrupt be raised */ if (PyErr_CheckSignals()) return NULL; /* recalculate timeout */ if (msecs != INFINITE) { ticks = GetTickCount(); if ((DWORD)(ticks - start) >= full_msecs) Py_RETURN_FALSE; msecs = full_msecs - (ticks - start); } } /* handle result */ switch (res) { case WAIT_TIMEOUT: Py_RETURN_FALSE; case WAIT_OBJECT_0: self->last_tid = GetCurrentThreadId(); ++self->count; Py_RETURN_TRUE; case WAIT_FAILED: return PyErr_SetFromWindowsErr(0); default: PyErr_Format(PyExc_RuntimeError, "WaitForSingleObjectEx() or " "WaitForMultipleObjects() gave unrecognized " "value %d", res); return NULL; } } static PyObject * Billiard_semlock_release(BilliardSemLockObject *self, PyObject *args) { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } if (!ReleaseSemaphore(self->handle, 1, NULL)) { if (GetLastError() == ERROR_TOO_MANY_POSTS) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } else { return PyErr_SetFromWindowsErr(0); } } --self->count; Py_RETURN_NONE; } #else /* !MS_WINDOWS */ /* * Unix definitions */ #define SEM_CLEAR_ERROR() #define SEM_GET_LAST_ERROR() 0 #define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val) #define SEM_CLOSE(sem) sem_close(sem) #define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval) #define SEM_UNLINK(name) sem_unlink(name) /* macOS 10.4 defines SEM_FAILED as -1 instead (sem_t *)-1; this gives compiler warnings, and (potentially) undefined behavior. */ #ifdef __APPLE__ # undef SEM_FAILED # define SEM_FAILED ((sem_t *)-1) #endif #ifndef HAVE_SEM_UNLINK # define sem_unlink(name) 0 #endif //#ifndef HAVE_SEM_TIMEDWAIT # define sem_timedwait(sem,deadline) Billiard_sem_timedwait_save(sem,deadline,_save) int Billiard_sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) { int res; unsigned long delay, difference; struct timeval now, tvdeadline, tvdelay; errno = 0; tvdeadline.tv_sec = deadline->tv_sec; tvdeadline.tv_usec = deadline->tv_nsec / 1000; for (delay = 0 ; ; delay += 1000) { /* poll */ if (sem_trywait(sem) == 0) return 0; else if (errno != EAGAIN) return MP_STANDARD_ERROR; /* get current time */ if (gettimeofday(&now, NULL) < 0) return MP_STANDARD_ERROR; /* check for timeout */ if (tvdeadline.tv_sec < now.tv_sec || (tvdeadline.tv_sec == now.tv_sec && tvdeadline.tv_usec <= now.tv_usec)) { errno = ETIMEDOUT; return MP_STANDARD_ERROR; } /* calculate how much time is left */ difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 + (tvdeadline.tv_usec - now.tv_usec); /* check delay not too long -- maximum is 20 msecs */ if (delay > 20000) delay = 20000; if (delay > difference) delay = difference; /* sleep */ tvdelay.tv_sec = delay / 1000000; tvdelay.tv_usec = delay % 1000000; if (select(0, NULL, NULL, NULL, &tvdelay) < 0) return MP_STANDARD_ERROR; /* check for signals */ Py_BLOCK_THREADS res = PyErr_CheckSignals(); Py_UNBLOCK_THREADS if (res) { errno = EINTR; return MP_EXCEPTION_HAS_BEEN_SET; } } } //#endif /* !HAVE_SEM_TIMEDWAIT */ static PyObject * Billiard_semlock_acquire(BilliardSemLockObject *self, PyObject *args, PyObject *kwds) { int blocking = 1, res; double timeout; PyObject *timeout_obj = Py_None; struct timespec deadline = {0}; struct timeval now; long sec, nsec; static char *kwlist[] = {"block", "timeout", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist, &blocking, &timeout_obj)) return NULL; if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } if (timeout_obj != Py_None) { timeout = PyFloat_AsDouble(timeout_obj); if (PyErr_Occurred()) return NULL; if (timeout < 0.0) timeout = 0.0; if (gettimeofday(&now, NULL) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } sec = (long) timeout; nsec = (long) (1e9 * (timeout - sec) + 0.5); deadline.tv_sec = now.tv_sec + sec; deadline.tv_nsec = now.tv_usec * 1000 + nsec; deadline.tv_sec += (deadline.tv_nsec / 1000000000); deadline.tv_nsec %= 1000000000; } do { Py_BEGIN_ALLOW_THREADS if (blocking && timeout_obj == Py_None) res = sem_wait(self->handle); else if (!blocking) res = sem_trywait(self->handle); else res = sem_timedwait(self->handle, &deadline); Py_END_ALLOW_THREADS if (res == MP_EXCEPTION_HAS_BEEN_SET) break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); if (res < 0) { if (errno == EAGAIN || errno == ETIMEDOUT) Py_RETURN_FALSE; else if (errno == EINTR) return NULL; else return PyErr_SetFromErrno(PyExc_OSError); } ++self->count; self->last_tid = PyThread_get_thread_ident(); Py_RETURN_TRUE; } static PyObject * Billiard_semlock_release(BilliardSemLockObject *self, PyObject *args) { if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " "release recursive lock not owned " "by thread"); return NULL; } if (self->count > 1) { --self->count; Py_RETURN_NONE; } assert(self->count == 1); } else { #ifdef HAVE_BROKEN_SEM_GETVALUE /* We will only check properly the maxvalue == 1 case */ if (self->maxvalue == 1) { /* make sure that already locked */ if (sem_trywait(self->handle) < 0) { if (errno != EAGAIN) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } /* it is already locked as expected */ } else { /* it was not locked so undo wait and raise */ if (sem_post(self->handle) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } PyErr_SetString(PyExc_ValueError, "semaphore " "or lock released too many " "times"); return NULL; } } #else int sval; /* This check is not an absolute guarantee that the semaphore does not rise above maxvalue. */ if (sem_getvalue(self->handle, &sval) < 0) { return PyErr_SetFromErrno(PyExc_OSError); } else if (sval >= self->maxvalue) { PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; } #endif } if (sem_post(self->handle) < 0) return PyErr_SetFromErrno(PyExc_OSError); --self->count; Py_RETURN_NONE; } #endif /* !MS_WINDOWS */ /* * All platforms */ static PyObject * Billiard_newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, char *name) { BilliardSemLockObject *self; self = PyObject_New(BilliardSemLockObject, type); if (!self) return NULL; self->handle = handle; self->kind = kind; self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; self->name = name; return (PyObject*)self; } static PyObject * Billiard_semlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { SEM_HANDLE handle = SEM_FAILED; int kind, maxvalue, value, unlink; PyObject *result; char *name, *name_copy = NULL; static char *kwlist[] = {"kind", "value", "maxvalue", "name", "unlink", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiisi", kwlist, &kind, &value, &maxvalue, &name, &unlink)) return NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { PyErr_SetString(PyExc_ValueError, "unrecognized kind"); return NULL; } if (!unlink) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) goto failure; strcpy(name_copy, name); } SEM_CLEAR_ERROR(); handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; if (unlink && SEM_UNLINK(name) < 0) goto failure; result = Billiard_newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; return result; failure: if (handle != SEM_FAILED) SEM_CLOSE(handle); PyMem_Free(name_copy); Billiard_SetError(NULL, MP_STANDARD_ERROR); return NULL; } static PyObject * Billiard_semlock_rebuild(PyTypeObject *type, PyObject *args) { SEM_HANDLE handle; int kind, maxvalue; char *name, *name_copy = NULL; if (!PyArg_ParseTuple(args, F_SEM_HANDLE "iiz", &handle, &kind, &maxvalue, &name)) return NULL; if (name != NULL) { name_copy = PyMem_Malloc(strlen(name) + 1); if (name_copy == NULL) return PyErr_NoMemory(); strcpy(name_copy, name); } #ifndef MS_WINDOWS if (name != NULL) { handle = sem_open(name, 0); if (handle == SEM_FAILED) { PyMem_Free(name_copy); return PyErr_SetFromErrno(PyExc_OSError); } } #endif return Billiard_newsemlockobject(type, handle, kind, maxvalue, name_copy); } static void Billiard_semlock_dealloc(BilliardSemLockObject* self) { if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); PyMem_Free(self->name); PyObject_Del(self); } static PyObject * Billiard_semlock_count(BilliardSemLockObject *self) { return PyInt_FromLong((long)self->count); } static PyObject * Billiard_semlock_ismine(BilliardSemLockObject *self) { /* only makes sense for a lock */ return PyBool_FromLong(ISMINE(self)); } static PyObject * Billiard_semlock_getvalue(BilliardSemLockObject *self) { #ifdef HAVE_BROKEN_SEM_GETVALUE PyErr_SetNone(PyExc_NotImplementedError); return NULL; #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return Billiard_SetError(NULL, MP_STANDARD_ERROR); /* some posix implementations use negative numbers to indicate the number of waiting threads */ if (sval < 0) sval = 0; return PyInt_FromLong((long)sval); #endif } static PyObject * Billiard_semlock_iszero(BilliardSemLockObject *self) { #ifdef HAVE_BROKEN_SEM_GETVALUE if (sem_trywait(self->handle) < 0) { if (errno == EAGAIN) Py_RETURN_TRUE; return Billiard_SetError(NULL, MP_STANDARD_ERROR); } else { if (sem_post(self->handle) < 0) return Billiard_SetError(NULL, MP_STANDARD_ERROR); Py_RETURN_FALSE; } #else int sval; if (SEM_GETVALUE(self->handle, &sval) < 0) return Billiard_SetError(NULL, MP_STANDARD_ERROR); return PyBool_FromLong((long)sval == 0); #endif } static PyObject * Billiard_semlock_afterfork(BilliardSemLockObject *self) { self->count = 0; Py_RETURN_NONE; } PyObject * Billiard_semlock_unlink(PyObject *ignore, PyObject *args) { char *name; if (!PyArg_ParseTuple(args, "s", &name)) return NULL; if (SEM_UNLINK(name) < 0) { Billiard_SetError(NULL, MP_STANDARD_ERROR); return NULL; } Py_RETURN_NONE; } /* * Semaphore methods */ static PyMethodDef Billiard_semlock_methods[] = { {"acquire", (PyCFunction)Billiard_semlock_acquire, METH_VARARGS | METH_KEYWORDS, "acquire the semaphore/lock"}, {"release", (PyCFunction)Billiard_semlock_release, METH_NOARGS, "release the semaphore/lock"}, {"__enter__", (PyCFunction)Billiard_semlock_acquire, METH_VARARGS | METH_KEYWORDS, "enter the semaphore/lock"}, {"__exit__", (PyCFunction)Billiard_semlock_release, METH_VARARGS, "exit the semaphore/lock"}, {"_count", (PyCFunction)Billiard_semlock_count, METH_NOARGS, "num of `acquire()`s minus num of `release()`s for this process"}, {"_is_mine", (PyCFunction)Billiard_semlock_ismine, METH_NOARGS, "whether the lock is owned by this thread"}, {"_get_value", (PyCFunction)Billiard_semlock_getvalue, METH_NOARGS, "get the value of the semaphore"}, {"_is_zero", (PyCFunction)Billiard_semlock_iszero, METH_NOARGS, "returns whether semaphore has value zero"}, {"_rebuild", (PyCFunction)Billiard_semlock_rebuild, METH_VARARGS | METH_CLASS, ""}, {"_after_fork", (PyCFunction)Billiard_semlock_afterfork, METH_NOARGS, "rezero the net acquisition count after fork()"}, {"sem_unlink", (PyCFunction)Billiard_semlock_unlink, METH_VARARGS | METH_STATIC, "unlink the named semaphore using sem_unlink()"}, {NULL} }; /* * Member table */ static PyMemberDef Billiard_semlock_members[] = { {"handle", T_SEM_HANDLE, offsetof(BilliardSemLockObject, handle), READONLY, ""}, {"kind", T_INT, offsetof(BilliardSemLockObject, kind), READONLY, ""}, {"maxvalue", T_INT, offsetof(BilliardSemLockObject, maxvalue), READONLY, ""}, {"name", T_STRING, offsetof(BilliardSemLockObject, name), READONLY, ""}, {NULL} }; /* * Semaphore type */ PyTypeObject BilliardSemLockType = { PyVarObject_HEAD_INIT(NULL, 0) /* tp_name */ "_billiard.SemLock", /* tp_basicsize */ sizeof(BilliardSemLockObject), /* tp_itemsize */ 0, /* tp_dealloc */ (destructor)Billiard_semlock_dealloc, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_doc */ "Semaphore/Mutex type", /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ Billiard_semlock_methods, /* tp_members */ Billiard_semlock_members, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ Billiard_semlock_new, }; billiard-3.5.0.3/Modules/_billiard/win32_functions.c0000644000175000017500000006473513132743245022142 0ustar omeromer00000000000000/* * Win32 functions used by multiprocessing package * * win32_functions.c * * Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt */ #include "multiprocessing.h" #if defined(MS_WIN32) && !defined(MS_WIN64) #define HANDLE_TO_PYNUM(handle) \ PyLong_FromUnsignedLong((unsigned long) handle) #define PYNUM_TO_HANDLE(obj) ((HANDLE)PyLong_AsUnsignedLong(obj)) #define F_POINTER "k" #define T_POINTER T_ULONG #else #define HANDLE_TO_PYNUM(handle) \ PyLong_FromUnsignedLongLong((unsigned long long) handle) #define PYNUM_TO_HANDLE(obj) ((HANDLE)PyLong_AsUnsignedLongLong(obj)) #define F_POINTER "K" #define T_POINTER T_ULONGLONG #endif #define F_HANDLE F_POINTER #define F_DWORD "k" #define F_BOOL "i" #define F_UINT "I" #define T_HANDLE T_POINTER #define DWORD_MAX 4294967295U #define Py_MIN(x, y) (((x) > (y)) ? (y) : (x)) /* Grab CancelIoEx dynamically from kernel32 */ static int has_CancelIoEx = -1; static BOOL (CALLBACK *Py_CancelIoEx)(HANDLE, LPOVERLAPPED); static int check_CancelIoEx() { if (has_CancelIoEx == -1) { HINSTANCE hKernel32 = GetModuleHandle("KERNEL32"); * (FARPROC *) &Py_CancelIoEx = GetProcAddress(hKernel32, "CancelIoEx"); has_CancelIoEx = (Py_CancelIoEx != NULL); } return has_CancelIoEx; } /* * A Python object wrapping an OVERLAPPED structure and other useful data * for overlapped I/O */ typedef struct { PyObject_HEAD OVERLAPPED overlapped; /* For convenience, we store the file handle too */ HANDLE handle; /* Whether there's I/O in flight */ int pending; /* Whether I/O completed successfully */ int completed; /* Buffer used for reading (optional) */ PyObject *read_buffer; /* Buffer used for writing (optional) */ Py_buffer write_buffer; } OverlappedObject; static void overlapped_dealloc(OverlappedObject *self) { DWORD bytes; int err = GetLastError(); if (self->pending) { if (check_CancelIoEx() && Py_CancelIoEx(self->handle, &self->overlapped) && GetOverlappedResult(self->handle, &self->overlapped, &bytes, TRUE)) { /* The operation is no longer pending -- nothing to do. */ } else { /* The operation is still pending, but the process is probably about to exit, so we need not worry too much about memory leaks. Leaking self prevents a potential crash. This can happen when a daemon thread is cleaned up at exit -- see #19565. We only expect to get here on Windows XP. */ CloseHandle(self->overlapped.hEvent); SetLastError(err); return; } } CloseHandle(self->overlapped.hEvent); SetLastError(err); if (self->write_buffer.obj) PyBuffer_Release(&self->write_buffer); Py_CLEAR(self->read_buffer); PyObject_Del(self); } static PyObject * overlapped_GetOverlappedResult(OverlappedObject *self, PyObject *waitobj) { int wait; BOOL res; DWORD transferred = 0; DWORD err; wait = PyObject_IsTrue(waitobj); if (wait < 0) return NULL; Py_BEGIN_ALLOW_THREADS res = GetOverlappedResult(self->handle, &self->overlapped, &transferred, wait != 0); Py_END_ALLOW_THREADS err = res ? ERROR_SUCCESS : GetLastError(); switch (err) { case ERROR_SUCCESS: case ERROR_MORE_DATA: case ERROR_OPERATION_ABORTED: self->completed = 1; self->pending = 0; break; case ERROR_IO_INCOMPLETE: break; default: self->pending = 0; return PyErr_SetExcFromWindowsErr(PyExc_IOError, err); } if (self->completed && self->read_buffer != NULL) { assert(PyBytes_CheckExact(self->read_buffer)); if (transferred != PyBytes_GET_SIZE(self->read_buffer) && _PyBytes_Resize(&self->read_buffer, transferred)) return NULL; } return Py_BuildValue("II", (unsigned) transferred, (unsigned) err); } static PyObject * overlapped_getbuffer(OverlappedObject *self) { PyObject *res; if (!self->completed) { PyErr_SetString(PyExc_ValueError, "can't get read buffer before GetOverlappedResult() " "signals the operation completed"); return NULL; } res = self->read_buffer ? self->read_buffer : Py_None; Py_INCREF(res); return res; } static PyObject * overlapped_cancel(OverlappedObject *self) { BOOL res = TRUE; if (self->pending) { Py_BEGIN_ALLOW_THREADS if (check_CancelIoEx()) res = Py_CancelIoEx(self->handle, &self->overlapped); else res = CancelIo(self->handle); Py_END_ALLOW_THREADS } /* CancelIoEx returns ERROR_NOT_FOUND if the I/O completed in-between */ if (!res && GetLastError() != ERROR_NOT_FOUND) return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0); self->pending = 0; Py_RETURN_NONE; } static PyMethodDef overlapped_methods[] = { {"GetOverlappedResult", (PyCFunction) overlapped_GetOverlappedResult, METH_O, NULL}, {"getbuffer", (PyCFunction) overlapped_getbuffer, METH_NOARGS, NULL}, {"cancel", (PyCFunction) overlapped_cancel, METH_NOARGS, NULL}, {NULL} }; static PyMemberDef overlapped_members[] = { {"event", T_HANDLE, offsetof(OverlappedObject, overlapped) + offsetof(OVERLAPPED, hEvent), READONLY, "overlapped event handle"}, {NULL} }; PyTypeObject OverlappedType = { PyVarObject_HEAD_INIT(NULL, 0) /* tp_name */ "_winapi.Overlapped", /* tp_basicsize */ sizeof(OverlappedObject), /* tp_itemsize */ 0, /* tp_dealloc */ (destructor) overlapped_dealloc, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ Py_TPFLAGS_DEFAULT, /* tp_doc */ "OVERLAPPED structure wrapper", /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ overlapped_methods, /* tp_members */ overlapped_members, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ 0, }; static OverlappedObject * new_overlapped(HANDLE handle) { OverlappedObject *self; self = PyObject_New(OverlappedObject, &OverlappedType); if (!self) return NULL; self->handle = handle; self->read_buffer = NULL; self->pending = 0; self->completed = 0; memset(&self->overlapped, 0, sizeof(OVERLAPPED)); memset(&self->write_buffer, 0, sizeof(Py_buffer)); /* Manual reset, initially non-signaled */ self->overlapped.hEvent = CreateEvent(NULL, TRUE, FALSE, NULL); return self; } static PyObject * win32_CloseHandle(PyObject *self, PyObject *args) { HANDLE hObject; BOOL success; if (!PyArg_ParseTuple(args, F_HANDLE, &hObject)) return NULL; Py_BEGIN_ALLOW_THREADS success = CloseHandle(hObject); Py_END_ALLOW_THREADS if (!success) return PyErr_SetFromWindowsErr(0); Py_RETURN_NONE; } static PyObject * win32_ConnectNamedPipe(PyObject *self, PyObject *args, PyObject *kwds) { HANDLE hNamedPipe; int use_overlapped = 0; BOOL success; OverlappedObject *overlapped = NULL; static char *kwlist[] = {"handle", "overlapped", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, F_HANDLE "|" F_BOOL, kwlist, &hNamedPipe, &use_overlapped)) return NULL; if (use_overlapped) { overlapped = new_overlapped(hNamedPipe); if (!overlapped) return NULL; } Py_BEGIN_ALLOW_THREADS success = ConnectNamedPipe(hNamedPipe, overlapped ? &overlapped->overlapped : NULL); Py_END_ALLOW_THREADS if (overlapped) { int err = GetLastError(); /* Overlapped ConnectNamedPipe never returns a success code */ assert(success == 0); if (err == ERROR_IO_PENDING) overlapped->pending = 1; else if (err == ERROR_PIPE_CONNECTED) SetEvent(overlapped->overlapped.hEvent); else { Py_DECREF(overlapped); return PyErr_SetFromWindowsErr(err); } return (PyObject *) overlapped; } if (!success) return PyErr_SetFromWindowsErr(0); Py_RETURN_NONE; } static PyObject * win32_CreateFile(PyObject *self, PyObject *args) { LPCTSTR lpFileName; DWORD dwDesiredAccess; DWORD dwShareMode; LPSECURITY_ATTRIBUTES lpSecurityAttributes; DWORD dwCreationDisposition; DWORD dwFlagsAndAttributes; HANDLE hTemplateFile; HANDLE handle; if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_POINTER F_DWORD F_DWORD F_HANDLE, &lpFileName, &dwDesiredAccess, &dwShareMode, &lpSecurityAttributes, &dwCreationDisposition, &dwFlagsAndAttributes, &hTemplateFile)) return NULL; Py_BEGIN_ALLOW_THREADS handle = CreateFile(lpFileName, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile); Py_END_ALLOW_THREADS if (handle == INVALID_HANDLE_VALUE) return PyErr_SetFromWindowsErr(0); return Py_BuildValue(F_HANDLE, handle); } static PyObject * win32_CreateNamedPipe(PyObject *self, PyObject *args) { LPCTSTR lpName; DWORD dwOpenMode; DWORD dwPipeMode; DWORD nMaxInstances; DWORD nOutBufferSize; DWORD nInBufferSize; DWORD nDefaultTimeOut; LPSECURITY_ATTRIBUTES lpSecurityAttributes; HANDLE handle; if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_DWORD F_DWORD F_DWORD F_DWORD F_POINTER, &lpName, &dwOpenMode, &dwPipeMode, &nMaxInstances, &nOutBufferSize, &nInBufferSize, &nDefaultTimeOut, &lpSecurityAttributes)) return NULL; Py_BEGIN_ALLOW_THREADS handle = CreateNamedPipe(lpName, dwOpenMode, dwPipeMode, nMaxInstances, nOutBufferSize, nInBufferSize, nDefaultTimeOut, lpSecurityAttributes); Py_END_ALLOW_THREADS if (handle == INVALID_HANDLE_VALUE) return PyErr_SetFromWindowsErr(0); return Py_BuildValue(F_HANDLE, handle); } static PyObject * win32_ExitProcess(PyObject *self, PyObject *args) { UINT uExitCode; if (!PyArg_ParseTuple(args, "I", &uExitCode)) return NULL; #if defined(Py_DEBUG) SetErrorMode(SEM_FAILCRITICALERRORS|SEM_NOALIGNMENTFAULTEXCEPT|SEM_NOGPFAULTERRORBOX|SEM_NOOPENFILEERRORBOX); _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_DEBUG); #endif ExitProcess(uExitCode); return NULL; } static PyObject * win32_GetLastError(PyObject *self, PyObject *args) { return Py_BuildValue(F_DWORD, GetLastError()); } static PyObject * win32_OpenProcess(PyObject *self, PyObject *args) { DWORD dwDesiredAccess; BOOL bInheritHandle; DWORD dwProcessId; HANDLE handle; if (!PyArg_ParseTuple(args, F_DWORD "i" F_DWORD, &dwDesiredAccess, &bInheritHandle, &dwProcessId)) return NULL; handle = OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId); if (handle == NULL) return PyErr_SetFromWindowsErr(0); return Py_BuildValue(F_HANDLE, handle); } static PyObject * win32_SetNamedPipeHandleState(PyObject *self, PyObject *args) { HANDLE hNamedPipe; PyObject *oArgs[3]; DWORD dwArgs[3], *pArgs[3] = {NULL, NULL, NULL}; int i; if (!PyArg_ParseTuple(args, F_HANDLE "OOO", &hNamedPipe, &oArgs[0], &oArgs[1], &oArgs[2])) return NULL; PyErr_Clear(); for (i = 0 ; i < 3 ; i++) { if (oArgs[i] != Py_None) { dwArgs[i] = PyInt_AsUnsignedLongMask(oArgs[i]); if (PyErr_Occurred()) return NULL; pArgs[i] = &dwArgs[i]; } } if (!SetNamedPipeHandleState(hNamedPipe, pArgs[0], pArgs[1], pArgs[2])) return PyErr_SetFromWindowsErr(0); Py_RETURN_NONE; } static PyObject * win32_WaitNamedPipe(PyObject *self, PyObject *args) { LPCTSTR lpNamedPipeName; DWORD nTimeOut; BOOL success; if (!PyArg_ParseTuple(args, "s" F_DWORD, &lpNamedPipeName, &nTimeOut)) return NULL; Py_BEGIN_ALLOW_THREADS success = WaitNamedPipe(lpNamedPipeName, nTimeOut); Py_END_ALLOW_THREADS if (!success) return PyErr_SetFromWindowsErr(0); Py_RETURN_NONE; } static PyObject * win32_PeekNamedPipe(PyObject *self, PyObject *args) { HANDLE handle; int size = 0; PyObject *buf = NULL; DWORD nread, navail, nleft; BOOL ret; if (!PyArg_ParseTuple(args, F_HANDLE "|i:PeekNamedPipe" , &handle, &size)) return NULL; if (size < 0) { PyErr_SetString(PyExc_ValueError, "negative size"); return NULL; } if (size) { buf = PyBytes_FromStringAndSize(NULL, size); if (!buf) return NULL; Py_BEGIN_ALLOW_THREADS ret = PeekNamedPipe(handle, PyBytes_AS_STRING(buf), size, &nread, &navail, &nleft); Py_END_ALLOW_THREADS if (!ret) { Py_DECREF(buf); return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0); } if (_PyBytes_Resize(&buf, nread)) return NULL; return Py_BuildValue("Nii", buf, navail, nleft); } else { Py_BEGIN_ALLOW_THREADS ret = PeekNamedPipe(handle, NULL, 0, NULL, &navail, &nleft); Py_END_ALLOW_THREADS if (!ret) { return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0); } return Py_BuildValue("ii", navail, nleft); } } static PyObject * win32_WriteFile(PyObject *self, PyObject *args, PyObject *kwds) { HANDLE handle; Py_buffer _buf, *buf; PyObject *bufobj; DWORD len, written; BOOL ret; int use_overlapped = 0; DWORD err; OverlappedObject *overlapped = NULL; static char *kwlist[] = {"handle", "buffer", "overlapped", NULL}; /* First get handle and use_overlapped to know which Py_buffer to use */ if (!PyArg_ParseTupleAndKeywords(args, kwds, F_HANDLE "O|i:WriteFile", kwlist, &handle, &bufobj, &use_overlapped)) return NULL; if (use_overlapped) { overlapped = new_overlapped(handle); if (!overlapped) return NULL; buf = &overlapped->write_buffer; } else buf = &_buf; if (!PyArg_Parse(bufobj, "y*", buf)) { Py_XDECREF(overlapped); return NULL; } Py_BEGIN_ALLOW_THREADS len = (DWORD)Py_MIN(buf->len, DWORD_MAX); ret = WriteFile(handle, buf->buf, len, &written, overlapped ? &overlapped->overlapped : NULL); Py_END_ALLOW_THREADS err = ret ? 0 : GetLastError(); if (overlapped) { if (!ret) { if (err == ERROR_IO_PENDING) overlapped->pending = 1; else { Py_DECREF(overlapped); return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0); } } return Py_BuildValue("NI", (PyObject *) overlapped, err); } PyBuffer_Release(buf); if (!ret) return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0); return Py_BuildValue("II", written, err); } static PyObject * win32_ReadFile(PyObject *self, PyObject *args, PyObject *kwds) { HANDLE handle; int size; DWORD nread; PyObject *buf; BOOL ret; int use_overlapped = 0; DWORD err; OverlappedObject *overlapped = NULL; static char *kwlist[] = {"handle", "size", "overlapped", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, F_HANDLE "i|i:ReadFile", kwlist, &handle, &size, &use_overlapped)) return NULL; buf = PyBytes_FromStringAndSize(NULL, size); if (!buf) return NULL; if (use_overlapped) { overlapped = new_overlapped(handle); if (!overlapped) { Py_DECREF(buf); return NULL; } /* Steals reference to buf */ overlapped->read_buffer = buf; } Py_BEGIN_ALLOW_THREADS ret = ReadFile(handle, PyBytes_AS_STRING(buf), size, &nread, overlapped ? &overlapped->overlapped : NULL); Py_END_ALLOW_THREADS err = ret ? 0 : GetLastError(); if (overlapped) { if (!ret) { if (err == ERROR_IO_PENDING) overlapped->pending = 1; else if (err != ERROR_MORE_DATA) { Py_DECREF(overlapped); return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0); } } return Py_BuildValue("NI", (PyObject *) overlapped, err); } if (!ret && err != ERROR_MORE_DATA) { Py_DECREF(buf); return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0); } if (_PyBytes_Resize(&buf, nread)) return NULL; return Py_BuildValue("NI", buf, err); } static PyObject * win32_WaitForMultipleObjects(PyObject* self, PyObject* args) { DWORD result; PyObject *handle_seq; HANDLE handles[MAXIMUM_WAIT_OBJECTS]; HANDLE sigint_event = NULL; Py_ssize_t nhandles, i; BOOL wait_flag; DWORD milliseconds = INFINITE; if (!PyArg_ParseTuple(args, "O" F_BOOL "|" F_DWORD ":WaitForMultipleObjects", &handle_seq, &wait_flag, &milliseconds)) return NULL; if (!PySequence_Check(handle_seq)) { PyErr_Format(PyExc_TypeError, "sequence type expected, got '%s'", Py_TYPE(handle_seq)->tp_name); return NULL; } nhandles = PySequence_Length(handle_seq); if (nhandles == -1) return NULL; if (nhandles < 0 || nhandles >= MAXIMUM_WAIT_OBJECTS - 1) { PyErr_Format(PyExc_ValueError, "need at most %zd handles, got a sequence of length %zd", MAXIMUM_WAIT_OBJECTS - 1, nhandles); return NULL; } for (i = 0; i < nhandles; i++) { HANDLE h; PyObject *v = PySequence_GetItem(handle_seq, i); if (v == NULL) return NULL; if (!PyArg_Parse(v, F_HANDLE, &h)) { Py_DECREF(v); return NULL; } handles[i] = h; Py_DECREF(v); } /* If this is the main thread then make the wait interruptible by Ctrl-C unless we are waiting for *all* handles if (!wait_flag && _PyOS_IsMainThread()) { sigint_event = _PyOS_SigintEvent(); assert(sigint_event != NULL); handles[nhandles++] = sigint_event; }*/ Py_BEGIN_ALLOW_THREADS /*if (sigint_event != NULL) ResetEvent(sigint_event);*/ result = WaitForMultipleObjects((DWORD) nhandles, handles, wait_flag, milliseconds); Py_END_ALLOW_THREADS if (result == WAIT_FAILED) return PyErr_SetExcFromWindowsErr(PyExc_IOError, 0); /* else if (sigint_event != NULL && result == WAIT_OBJECT_0 + nhandles - 1) { errno = EINTR; return PyErr_SetFromErrno(PyExc_IOError); }*/ return PyLong_FromLong((int) result); } static PyObject * win32_GetCurrentProcess(PyObject* self, PyObject* args) { if (! PyArg_ParseTuple(args, ":GetCurrentProcess")) return NULL; return HANDLE_TO_PYNUM(GetCurrentProcess()); } static PyObject * win32_DuplicateHandle(PyObject* self, PyObject* args) { HANDLE target_handle; BOOL result; HANDLE source_process_handle; HANDLE source_handle; HANDLE target_process_handle; DWORD desired_access; BOOL inherit_handle; DWORD options = 0; if (! PyArg_ParseTuple(args, F_HANDLE F_HANDLE F_HANDLE F_DWORD F_BOOL F_DWORD ":DuplicateHandle", &source_process_handle, &source_handle, &target_process_handle, &desired_access, &inherit_handle, &options)) return NULL; Py_BEGIN_ALLOW_THREADS result = DuplicateHandle( source_process_handle, source_handle, target_process_handle, &target_handle, desired_access, inherit_handle, options ); Py_END_ALLOW_THREADS if (! result) return PyErr_SetFromWindowsErr(GetLastError()); return HANDLE_TO_PYNUM(target_handle); } static PyObject * win32_CreatePipe(PyObject* self, PyObject* args) { HANDLE read_pipe; HANDLE write_pipe; BOOL result; PyObject* pipe_attributes; /* ignored */ DWORD size; if (! PyArg_ParseTuple(args, "O" F_DWORD ":CreatePipe", &pipe_attributes, &size)) return NULL; Py_BEGIN_ALLOW_THREADS result = CreatePipe(&read_pipe, &write_pipe, NULL, size); Py_END_ALLOW_THREADS if (! result) return PyErr_SetFromWindowsErr(GetLastError()); return Py_BuildValue( "NN", HANDLE_TO_PYNUM(read_pipe), HANDLE_TO_PYNUM(write_pipe)); } static PyObject * win32_WaitForSingleObject(PyObject* self, PyObject* args) { DWORD result; HANDLE handle; DWORD milliseconds; if (! PyArg_ParseTuple(args, F_HANDLE F_DWORD ":WaitForSingleObject", &handle, &milliseconds)) return NULL; Py_BEGIN_ALLOW_THREADS result = WaitForSingleObject(handle, milliseconds); Py_END_ALLOW_THREADS if (result == WAIT_FAILED) return PyErr_SetFromWindowsErr(GetLastError()); return PyLong_FromUnsignedLong(result); } static PyObject * win32_GetExitCodeProcess(PyObject* self, PyObject* args) { DWORD exit_code; BOOL result; HANDLE process; if (! PyArg_ParseTuple(args, F_HANDLE ":GetExitCodeProcess", &process)) return NULL; result = GetExitCodeProcess(process, &exit_code); if (! result) return PyErr_SetFromWindowsErr(GetLastError()); return PyLong_FromUnsignedLong(exit_code); } static PyObject * win32_TerminateProcess(PyObject* self, PyObject* args) { BOOL result; HANDLE process; UINT exit_code; if (! PyArg_ParseTuple(args, F_HANDLE F_UINT ":TerminateProcess", &process, &exit_code)) return NULL; result = TerminateProcess(process, exit_code); if (! result) return PyErr_SetFromWindowsErr(GetLastError()); Py_INCREF(Py_None); return Py_None; } static PyMethodDef win32_functions[] = { {"CloseHandle", win32_CloseHandle, METH_VARARGS, ""}, {"GetLastError", win32_GetLastError, METH_NOARGS, ""}, {"OpenProcess", win32_OpenProcess, METH_VARARGS, ""}, {"ExitProcess", win32_ExitProcess, METH_VARARGS, ""}, {"ConnectNamedPipe", (PyCFunction)win32_ConnectNamedPipe, METH_VARARGS | METH_KEYWORDS, ""}, {"CreateFile", win32_CreateFile, METH_VARARGS, ""}, {"WriteFile", (PyCFunction)win32_WriteFile, METH_VARARGS | METH_KEYWORDS, ""}, {"ReadFile", (PyCFunction)win32_ReadFile, METH_VARARGS | METH_KEYWORDS, ""}, {"CreateNamedPipe", win32_CreateNamedPipe, METH_VARARGS, ""}, {"SetNamedPipeHandleState", win32_SetNamedPipeHandleState, METH_VARARGS, ""}, {"WaitNamedPipe", win32_WaitNamedPipe, METH_VARARGS, ""}, {"PeekNamedPipe", win32_PeekNamedPipe, METH_VARARGS, ""}, {"WaitForMultipleObjects", win32_WaitForMultipleObjects, METH_VARARGS, ""}, {"WaitForSingleObject", win32_WaitForSingleObject, METH_VARARGS, ""}, {"GetCurrentProcess", win32_GetCurrentProcess, METH_VARARGS, ""}, {"GetExitCodeProcess", win32_GetExitCodeProcess, METH_VARARGS, ""}, {"TerminateProcess", win32_TerminateProcess, METH_VARARGS, ""}, {"DuplicateHandle", win32_DuplicateHandle, METH_VARARGS, ""}, {"CreatePipe", win32_CreatePipe, METH_VARARGS, ""}, {NULL} }; #define WIN32_CONSTANT(fmt, con) \ PyDict_SetItemString(d, #con, Py_BuildValue(fmt, con)) PyMODINIT_FUNC init_winapi(void) { PyObject *d; PyObject *m; if (PyType_Ready(&OverlappedType) < 0) return NULL; m = Py_InitModule("_winapi", win32_functions); if (!m) return; d = PyModule_GetDict(m); PyDict_SetItemString(d, "Overlapped", (PyObject *) &OverlappedType); /* constants */ WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS); WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY); WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED); WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT); WIN32_CONSTANT(F_DWORD, ERROR_MORE_DATA); WIN32_CONSTANT(F_DWORD, ERROR_BROKEN_PIPE); WIN32_CONSTANT(F_DWORD, ERROR_IO_PENDING); WIN32_CONSTANT(F_DWORD, ERROR_NETNAME_DELETED); WIN32_CONSTANT(F_DWORD, GENERIC_READ); WIN32_CONSTANT(F_DWORD, GENERIC_WRITE); WIN32_CONSTANT(F_DWORD, DUPLICATE_SAME_ACCESS); WIN32_CONSTANT(F_DWORD, DUPLICATE_CLOSE_SOURCE); WIN32_CONSTANT(F_DWORD, INFINITE); WIN32_CONSTANT(F_DWORD, NMPWAIT_WAIT_FOREVER); WIN32_CONSTANT(F_DWORD, OPEN_EXISTING); WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_DUPLEX); WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_INBOUND); WIN32_CONSTANT(F_DWORD, PIPE_READMODE_MESSAGE); WIN32_CONSTANT(F_DWORD, PIPE_TYPE_MESSAGE); WIN32_CONSTANT(F_DWORD, PIPE_UNLIMITED_INSTANCES); WIN32_CONSTANT(F_DWORD, PIPE_WAIT); WIN32_CONSTANT(F_DWORD, PROCESS_ALL_ACCESS); WIN32_CONSTANT(F_DWORD, PROCESS_DUP_HANDLE); WIN32_CONSTANT(F_DWORD, WAIT_OBJECT_0); WIN32_CONSTANT(F_DWORD, WAIT_ABANDONED_0); WIN32_CONSTANT(F_DWORD, WAIT_TIMEOUT); WIN32_CONSTANT(F_DWORD, FILE_FLAG_FIRST_PIPE_INSTANCE); WIN32_CONSTANT(F_DWORD, FILE_FLAG_OVERLAPPED); WIN32_CONSTANT("i", NULL); } billiard-3.5.0.3/Modules/_billiard/multiprocessing.h0000644000175000017500000001013713132743245022327 0ustar omeromer00000000000000#ifndef MULTIPROCESSING_H #define MULTIPROCESSING_H #define PY_SSIZE_T_CLEAN #ifdef __sun /* The control message API is only available on Solaris if XPG 4.2 or later is requested. */ #define _XOPEN_SOURCE 500 #endif #include "Python.h" #include "structmember.h" #include "pythread.h" /* * Platform includes and definitions */ #ifdef MS_WINDOWS # define WIN32_LEAN_AND_MEAN # include # include # include /* getpid() */ # ifdef Py_DEBUG # include # endif # define SEM_HANDLE HANDLE # define SEM_VALUE_MAX LONG_MAX #else # include /* O_CREAT and O_EXCL */ # include # include # include # include /* htonl() and ntohl() */ # if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) # include typedef sem_t *SEM_HANDLE; # endif # define HANDLE int # define SOCKET int # define BOOL int # define UINT32 uint32_t # define INT32 int32_t # define TRUE 1 # define FALSE 0 # define INVALID_HANDLE_VALUE (-1) #endif /* * Issue 3110 - Solaris does not define SEM_VALUE_MAX */ #ifndef SEM_VALUE_MAX #if defined(HAVE_SYSCONF) && defined(_SC_SEM_VALUE_MAX) # define SEM_VALUE_MAX sysconf(_SC_SEM_VALUE_MAX) #elif defined(_SEM_VALUE_MAX) # define SEM_VALUE_MAX _SEM_VALUE_MAX #elif defined(_POSIX_SEM_VALUE_MAX) # define SEM_VALUE_MAX _POSIX_SEM_VALUE_MAX #else # define SEM_VALUE_MAX INT_MAX #endif #endif /* * Make sure Py_ssize_t available */ #if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) typedef int Py_ssize_t; # define PY_SSIZE_T_MAX INT_MAX # define PY_SSIZE_T_MIN INT_MIN # define F_PY_SSIZE_T "i" # define PyInt_FromSsize_t(n) PyInt_FromLong((long)n) #else # define F_PY_SSIZE_T "n" #endif /* * Format codes */ #if SIZEOF_VOID_P == SIZEOF_LONG # define F_POINTER "k" # define T_POINTER T_ULONG #elif defined(HAVE_LONG_LONG) && (SIZEOF_VOID_P == SIZEOF_LONG_LONG) # define F_POINTER "K" # define T_POINTER T_ULONGLONG #else # error "can't find format code for unsigned integer of same size as void*" #endif #ifdef MS_WINDOWS # define F_HANDLE F_POINTER # define T_HANDLE T_POINTER # define F_SEM_HANDLE F_HANDLE # define T_SEM_HANDLE T_HANDLE # define F_DWORD "k" # define T_DWORD T_ULONG #else # define F_HANDLE "i" # define T_HANDLE T_INT # define F_SEM_HANDLE F_POINTER # define T_SEM_HANDLE T_POINTER #endif #if PY_VERSION_HEX >= 0x03000000 # define F_RBUFFER "y" #else # define F_RBUFFER "s" #endif /* * Error codes which can be returned by functions called without GIL */ #define MP_SUCCESS (0) #define MP_STANDARD_ERROR (-1) #define MP_MEMORY_ERROR (-1001) #define MP_END_OF_FILE (-1002) #define MP_EARLY_END_OF_FILE (-1003) #define MP_BAD_MESSAGE_LENGTH (-1004) #define MP_SOCKET_ERROR (-1005) #define MP_EXCEPTION_HAS_BEEN_SET (-1006) PyObject *Billiard_SetError(PyObject *Type, int num); /* * Externs - not all will really exist on all platforms */ extern PyObject *Billiard_BufferTooShort; extern PyTypeObject BilliardSemLockType; extern PyObject *Billiard_semlock_unlink(PyObject *ignore, PyObject *args); extern HANDLE sigint_event; /* * Py3k compatibility */ #if PY_VERSION_HEX >= 0x03000000 # define PICKLE_MODULE "pickle" # define FROM_FORMAT PyUnicode_FromFormat # define PyInt_FromLong PyLong_FromLong # define PyInt_FromSsize_t PyLong_FromSsize_t #else # define PICKLE_MODULE "cPickle" # define FROM_FORMAT PyString_FromFormat #endif #ifndef PyVarObject_HEAD_INIT # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, #endif #ifndef Py_TPFLAGS_HAVE_WEAKREFS # define Py_TPFLAGS_HAVE_WEAKREFS 0 #endif /* * Connection definition */ #define CONNECTION_BUFFER_SIZE 131072 typedef struct { PyObject_HEAD HANDLE handle; int flags; PyObject *weakreflist; char buffer[CONNECTION_BUFFER_SIZE]; } BilliardConnectionObject; /* * Miscellaneous */ #define MAX_MESSAGE_LENGTH 0x7fffffff #ifndef Py_MIN # define Py_MIN(x, y) (((x) > (y)) ? (y) : (x)) #endif #endif /* MULTIPROCESSING_H */ billiard-3.5.0.3/requirements/0000755000175000017500000000000013132746522016120 5ustar omeromer00000000000000billiard-3.5.0.3/requirements/test-ci.txt0000644000175000017500000000001313132743245020222 0ustar omeromer00000000000000pytest-cov billiard-3.5.0.3/requirements/pkgutils.txt0000644000175000017500000000011113132743245020513 0ustar omeromer00000000000000setuptools>=20.6.7 wheel>=0.29.0 flake8>=2.5.4 flakeplus>=1.1 tox>=2.3.1 billiard-3.5.0.3/requirements/test.txt0000644000175000017500000000003013132743245017630 0ustar omeromer00000000000000case>=1.3.1 pytest>=3.0 billiard-3.5.0.3/setup.py0000644000175000017500000002025413132743245015111 0ustar omeromer00000000000000from __future__ import print_function import os import sys import glob import setuptools import setuptools.command.test from distutils import sysconfig from distutils.errors import ( CCompilerError, DistutilsExecError, DistutilsPlatformError ) HERE = os.path.dirname(os.path.abspath(__file__)) ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) if sys.platform == 'win32' and sys.version_info >= (2, 6): # distutils.msvc9compiler can raise IOError if the compiler is missing ext_errors += (IOError, ) is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') is_py3k = sys.version_info[0] == 3 BUILD_WARNING = """ ----------------------------------------------------------------------- WARNING: The C extensions could not be compiled ----------------------------------------------------------------------- Maybe you do not have a C compiler installed on this system? The reason was: %s This is just a warning as most of the functionality will work even without the updated C extension. It will simply fallback to the built-in _multiprocessing module. Most notably you will not be able to use FORCE_EXECV on POSIX systems. If this is a problem for you then please install a C compiler or fix the error(s) above. ----------------------------------------------------------------------- """ # -*- py3k -*- extras = {} # -*- Distribution Meta -*- import re re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)') re_vers = re.compile(r'VERSION\s*=\s*\((.*?)\)') re_doc = re.compile(r'^"""(.+?)"""') rq = lambda s: s.strip("\"'") def add_default(m): attr_name, attr_value = m.groups() return ((attr_name, rq(attr_value)), ) def add_version(m): v = list(map(rq, m.groups()[0].split(', '))) return (('VERSION', '.'.join(v[0:4]) + ''.join(v[4:])), ) def add_doc(m): return (('doc', m.groups()[0]), ) pats = {re_meta: add_default, re_vers: add_version, re_doc: add_doc} here = os.path.abspath(os.path.dirname(__file__)) meta_fh = open(os.path.join(here, 'billiard/__init__.py')) try: meta = {} for line in meta_fh: if line.strip() == '# -eof meta-': break for pattern, handler in pats.items(): m = pattern.match(line.strip()) if m: meta.update(handler(m)) finally: meta_fh.close() if sys.version_info < (2, 7): raise ValueError('Versions of Python before 2.7 are not supported') if sys.platform == 'win32': # Windows macros = dict() libraries = ['ws2_32'] elif sys.platform.startswith('darwin'): # macOS macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=0, HAVE_FD_TRANSFER=1, HAVE_BROKEN_SEM_GETVALUE=1 ) libraries = [] elif sys.platform.startswith('cygwin'): # Cygwin macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=0, HAVE_BROKEN_SEM_UNLINK=1 ) libraries = [] elif sys.platform in ('freebsd4', 'freebsd5', 'freebsd6'): # FreeBSD's P1003.1b semaphore support is very experimental # and has many known problems. (as of June 2008) macros = dict( # FreeBSD 4-6 HAVE_SEM_OPEN=0, HAVE_SEM_TIMEDWAIT=0, HAVE_FD_TRANSFER=1, ) libraries = [] elif re.match('^(gnukfreebsd(8|9|10|11)|freebsd(7|8|9|0))', sys.platform): macros = dict( # FreeBSD 7+ and GNU/kFreeBSD 8+ HAVE_SEM_OPEN=bool( sysconfig.get_config_var('HAVE_SEM_OPEN') and not bool(sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')) ), HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=1, ) libraries = [] elif sys.platform.startswith('openbsd'): macros = dict( # OpenBSD HAVE_SEM_OPEN=0, # Not implemented HAVE_SEM_TIMEDWAIT=0, HAVE_FD_TRANSFER=1, ) libraries = [] else: # Linux and other unices macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=1, ) libraries = ['rt'] if sys.platform == 'win32': multiprocessing_srcs = [ 'Modules/_billiard/multiprocessing.c', 'Modules/_billiard/semaphore.c', 'Modules/_billiard/win32_functions.c', ] else: multiprocessing_srcs = [ 'Modules/_billiard/multiprocessing.c', ] if macros.get('HAVE_SEM_OPEN', False): multiprocessing_srcs.append('Modules/_billiard/semaphore.c') long_description = open(os.path.join(HERE, 'README.rst')).read() # -*- Installation Requires -*- py_version = sys.version_info is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') def strip_comments(l): return l.split('#', 1)[0].strip() def reqs(f): return list(filter(None, [strip_comments(l) for l in open( os.path.join(os.getcwd(), 'requirements', f)).readlines()])) def _is_build_command(argv=sys.argv, cmds=('install', 'build', 'bdist')): for arg in argv: if arg.startswith(cmds): return arg class pytest(setuptools.command.test.test): user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')] def initialize_options(self): setuptools.command.test.test.initialize_options(self) self.pytest_args = [] def run_tests(self): import pytest sys.exit(pytest.main(self.pytest_args)) def run_setup(with_extensions=True): extensions = [] if with_extensions: extensions = [ setuptools.Extension( '_billiard', sources=multiprocessing_srcs, define_macros=macros.items(), libraries=libraries, include_dirs=['Modules/_billiard'], depends=glob.glob('Modules/_billiard/*.h') + ['setup.py'], ), ] if sys.platform == 'win32': extensions.append( setuptools.Extension( '_winapi', sources=multiprocessing_srcs, define_macros=macros.items(), libraries=libraries, include_dirs=['Modules/_billiard'], depends=glob.glob('Modules/_billiard/*.h') + ['setup.py'], ), ) packages = setuptools.find_packages(exclude=['ez_setup', 't', 't.*']) setuptools.setup( name='billiard', version=meta['VERSION'], description=meta['doc'], long_description=long_description, packages=packages, ext_modules=extensions, author=meta['author'], author_email=meta['author_email'], keywords='multiprocessing pool process', maintainer=meta['maintainer'], maintainer_email=meta['contact'], url=meta['homepage'], zip_safe=False, license='BSD', tests_require=reqs('test.txt'), cmdclass={'test': pytest}, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Programming Language :: Python', 'Programming Language :: C', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: Jython', 'Programming Language :: Python :: Implementation :: PyPy', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'License :: OSI Approved :: BSD License', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Distributed Computing', ], **extras ) try: run_setup(not (is_jython or is_pypy or is_py3k)) except BaseException: if _is_build_command(sys.argv): import traceback print(BUILD_WARNING % '\n'.join(traceback.format_stack()), file=sys.stderr) run_setup(False) else: raise billiard-3.5.0.3/setup.cfg0000644000175000017500000000034413132746522015217 0ustar omeromer00000000000000[tool:pytest] testpaths = t/unit/ python_classes = test_* [flake8] ignore = N806, N802, N801, N803, E305 [pep257] ignore = D102,D104,D203,D105,D213 [metadata] license_file = LICENSE.txt [egg_info] tag_build = tag_date = 0 billiard-3.5.0.3/PKG-INFO0000644000175000017500000001001513132746522014467 0ustar omeromer00000000000000Metadata-Version: 1.1 Name: billiard Version: 3.5.0.3 Summary: Python multiprocessing fork with improvements and bugfixes Home-page: http://github.com/celery/billiard Author: Ask Solem Author-email: ask@celeryproject.org License: BSD Description: ======== billiard ======== :version: 3.5.0.2 |build-status-lin| |build-status-win| |license| |wheel| |pyversion| |pyimp| .. |build-status-lin| image:: https://secure.travis-ci.org/celery/billiard.png?branch=master :alt: Build status on Linux :target: https://travis-ci.org/celery/billiard .. |build-status-win| image:: https://ci.appveyor.com/api/projects/status/github/celery/billiard?png=true&branch=master :alt: Build status on Windows :target: https://ci.appveyor.com/project/ask/billiard .. |license| image:: https://img.shields.io/pypi/l/billiard.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/billiard.svg :alt: Billiard can be installed via wheel :target: http://pypi.python.org/pypi/billiard .. |pyversion| image:: https://img.shields.io/pypi/pyversions/billiard.svg :alt: Supported Python versions. :target: http://pypi.python.org/pypi/billiard .. |pyimp| image:: https://img.shields.io/pypi/implementation/billiard.svg :alt: Support Python implementations. :target: http://pypi.python.org/pypi/billiard About ----- `billiard` is a fork of the Python 2.7 `multiprocessing `_ package. The multiprocessing package itself is a renamed and updated version of R Oudkerk's `pyprocessing `_ package. This standalone variant draws its fixes/improvements from python-trunk and provides additional bug fixes and improvements. - This package would not be possible if not for the contributions of not only the current maintainers but all of the contributors to the original pyprocessing package listed `here `_ - Also it is a fork of the multiprocessing backport package by Christian Heims. - It includes the no-execv patch contributed by R. Oudkerk. - And the Pool improvements previously located in `Celery`_. - Billiard is used in and is a dependency for `Celery`_ and is maintained by the Celery team. .. _`Celery`: http://celeryproject.org Bug reporting ------------- Please report bugs related to multiprocessing at the `Python bug tracker `_. Issues related to billiard should be reported at http://github.com/celery/billiard/issues. Keywords: multiprocessing pool process Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python Classifier: Programming Language :: C Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: Jython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: License :: OSI Approved :: BSD License Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: System :: Distributed Computing billiard-3.5.0.3/LICENSE.txt0000644000175000017500000000271313132743245015222 0ustar omeromer00000000000000Copyright (c) 2006-2008, R Oudkerk and Contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. billiard-3.5.0.3/README.rst0000644000175000017500000000446713132743245015076 0ustar omeromer00000000000000======== billiard ======== :version: 3.5.0.2 |build-status-lin| |build-status-win| |license| |wheel| |pyversion| |pyimp| .. |build-status-lin| image:: https://secure.travis-ci.org/celery/billiard.png?branch=master :alt: Build status on Linux :target: https://travis-ci.org/celery/billiard .. |build-status-win| image:: https://ci.appveyor.com/api/projects/status/github/celery/billiard?png=true&branch=master :alt: Build status on Windows :target: https://ci.appveyor.com/project/ask/billiard .. |license| image:: https://img.shields.io/pypi/l/billiard.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/billiard.svg :alt: Billiard can be installed via wheel :target: http://pypi.python.org/pypi/billiard .. |pyversion| image:: https://img.shields.io/pypi/pyversions/billiard.svg :alt: Supported Python versions. :target: http://pypi.python.org/pypi/billiard .. |pyimp| image:: https://img.shields.io/pypi/implementation/billiard.svg :alt: Support Python implementations. :target: http://pypi.python.org/pypi/billiard About ----- `billiard` is a fork of the Python 2.7 `multiprocessing `_ package. The multiprocessing package itself is a renamed and updated version of R Oudkerk's `pyprocessing `_ package. This standalone variant draws its fixes/improvements from python-trunk and provides additional bug fixes and improvements. - This package would not be possible if not for the contributions of not only the current maintainers but all of the contributors to the original pyprocessing package listed `here `_ - Also it is a fork of the multiprocessing backport package by Christian Heims. - It includes the no-execv patch contributed by R. Oudkerk. - And the Pool improvements previously located in `Celery`_. - Billiard is used in and is a dependency for `Celery`_ and is maintained by the Celery team. .. _`Celery`: http://celeryproject.org Bug reporting ------------- Please report bugs related to multiprocessing at the `Python bug tracker `_. Issues related to billiard should be reported at http://github.com/celery/billiard/issues. billiard-3.5.0.3/Doc/0000755000175000017500000000000013132746522014102 5ustar omeromer00000000000000billiard-3.5.0.3/Doc/conf.py0000644000175000017500000001374713132743245015414 0ustar omeromer00000000000000# -*- coding: utf-8 -*- # # multiprocessing documentation build configuration file, created by # sphinx-quickstart on Wed Nov 26 12:47:00 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.append(os.path.abspath('.')) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'multiprocessing' copyright = u'2008, Python Software Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. import os import sys sys.path.insert(0, os.path.join(os.getcwd(), os.pardir)) import billiard # # The short X.Y version. version = billiard.__version__ # The full version, including alpha/beta/rc tags. release = billiard.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'default.css' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/. #html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'multiprocessingdoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ ('index', 'multiprocessing.tex', ur'multiprocessing Documentation', ur'Python Software Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True billiard-3.5.0.3/Doc/index.rst0000644000175000017500000000074013132743245015743 0ustar omeromer00000000000000.. multiprocessing documentation master file, created by sphinx-quickstart on Wed Nov 26 12:47:00 2008. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to multiprocessing's documentation! =========================================== Contents: .. toctree:: library/multiprocessing.rst glossary.rst Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` billiard-3.5.0.3/Doc/includes/0000755000175000017500000000000013132746522015710 5ustar omeromer00000000000000billiard-3.5.0.3/Doc/includes/mp_newtype.py0000644000175000017500000000435413132743245020456 0ustar omeromer00000000000000# # This module shows how to use arbitrary callables with a subclass of # `BaseManager`. # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # from multiprocessing import freeze_support from multiprocessing.managers import BaseManager, BaseProxy import operator ## class Foo(object): def f(self): print 'you called Foo.f()' def g(self): print 'you called Foo.g()' def _h(self): print 'you called Foo._h()' # A simple generator function def baz(): for i in xrange(10): yield i*i # Proxy type for generator objects class GeneratorProxy(BaseProxy): _exposed_ = ('next', '__next__') def __iter__(self): return self def next(self): return self._callmethod('next') def __next__(self): return self._callmethod('__next__') # Function to return the operator module def get_operator_module(): return operator ## class MyManager(BaseManager): pass # register the Foo class; make `f()` and `g()` accessible via proxy MyManager.register('Foo1', Foo) # register the Foo class; make `g()` and `_h()` accessible via proxy MyManager.register('Foo2', Foo, exposed=('g', '_h')) # register the generator function baz; use `GeneratorProxy` to make proxies MyManager.register('baz', baz, proxytype=GeneratorProxy) # register get_operator_module(); make public functions accessible via proxy MyManager.register('operator', get_operator_module) ## def test(): manager = MyManager() manager.start() print '-' * 20 f1 = manager.Foo1() f1.f() f1.g() assert not hasattr(f1, '_h') assert sorted(f1._exposed_) == sorted(['f', 'g']) print '-' * 20 f2 = manager.Foo2() f2.g() f2._h() assert not hasattr(f2, 'f') assert sorted(f2._exposed_) == sorted(['g', '_h']) print '-' * 20 it = manager.baz() for i in it: print '<%d>' % i, print print '-' * 20 op = manager.operator() print 'op.add(23, 45) =', op.add(23, 45) print 'op.pow(2, 94) =', op.pow(2, 94) print 'op.getslice(range(10), 2, 6) =', op.getslice(range(10), 2, 6) print 'op.repeat(range(5), 3) =', op.repeat(range(5), 3) print 'op._exposed_ =', op._exposed_ ## if __name__ == '__main__': freeze_support() test() billiard-3.5.0.3/Doc/includes/__init__.py0000644000175000017500000000001313132743245020012 0ustar omeromer00000000000000# package billiard-3.5.0.3/Doc/includes/mp_synchronize.py0000644000175000017500000001401213132743245021326 0ustar omeromer00000000000000# # A test file for the `multiprocessing` package # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # import time, sys, random from Queue import Empty import multiprocessing # may get overwritten #### TEST_VALUE def value_func(running, mutex): random.seed() time.sleep(random.random()*4) mutex.acquire() print '\n\t\t\t' + str(multiprocessing.current_process()) + ' has finished' running.value -= 1 mutex.release() def test_value(): TASKS = 10 running = multiprocessing.Value('i', TASKS) mutex = multiprocessing.Lock() for i in range(TASKS): p = multiprocessing.Process(target=value_func, args=(running, mutex)) p.start() while running.value > 0: time.sleep(0.08) mutex.acquire() print running.value, sys.stdout.flush() mutex.release() print print 'No more running processes' #### TEST_QUEUE def queue_func(queue): for i in range(30): time.sleep(0.5 * random.random()) queue.put(i*i) queue.put('STOP') def test_queue(): q = multiprocessing.Queue() p = multiprocessing.Process(target=queue_func, args=(q,)) p.start() o = None while o != 'STOP': try: o = q.get(timeout=0.3) print o, sys.stdout.flush() except Empty: print 'TIMEOUT' print #### TEST_CONDITION def condition_func(cond): cond.acquire() print '\t' + str(cond) time.sleep(2) print '\tchild is notifying' print '\t' + str(cond) cond.notify() cond.release() def test_condition(): cond = multiprocessing.Condition() p = multiprocessing.Process(target=condition_func, args=(cond,)) print cond cond.acquire() print cond cond.acquire() print cond p.start() print 'main is waiting' cond.wait() print 'main has woken up' print cond cond.release() print cond cond.release() p.join() print cond #### TEST_SEMAPHORE def semaphore_func(sema, mutex, running): sema.acquire() mutex.acquire() running.value += 1 print running.value, 'tasks are running' mutex.release() random.seed() time.sleep(random.random()*2) mutex.acquire() running.value -= 1 print '%s has finished' % multiprocessing.current_process() mutex.release() sema.release() def test_semaphore(): sema = multiprocessing.Semaphore(3) mutex = multiprocessing.RLock() running = multiprocessing.Value('i', 0) processes = [ multiprocessing.Process(target=semaphore_func, args=(sema, mutex, running)) for i in range(10) ] for p in processes: p.start() for p in processes: p.join() #### TEST_JOIN_TIMEOUT def join_timeout_func(): print '\tchild sleeping' time.sleep(5.5) print '\n\tchild terminating' def test_join_timeout(): p = multiprocessing.Process(target=join_timeout_func) p.start() print 'waiting for process to finish' while 1: p.join(timeout=1) if not p.is_alive(): break print '.', sys.stdout.flush() #### TEST_EVENT def event_func(event): print '\t%r is waiting' % multiprocessing.current_process() event.wait() print '\t%r has woken up' % multiprocessing.current_process() def test_event(): event = multiprocessing.Event() processes = [multiprocessing.Process(target=event_func, args=(event,)) for i in range(5)] for p in processes: p.start() print 'main is sleeping' time.sleep(2) print 'main is setting event' event.set() for p in processes: p.join() #### TEST_SHAREDVALUES def sharedvalues_func(values, arrays, shared_values, shared_arrays): for i in range(len(values)): v = values[i][1] sv = shared_values[i].value assert v == sv for i in range(len(values)): a = arrays[i][1] sa = list(shared_arrays[i][:]) assert a == sa print 'Tests passed' def test_sharedvalues(): values = [ ('i', 10), ('h', -2), ('d', 1.25) ] arrays = [ ('i', range(100)), ('d', [0.25 * i for i in range(100)]), ('H', range(1000)) ] shared_values = [multiprocessing.Value(id, v) for id, v in values] shared_arrays = [multiprocessing.Array(id, a) for id, a in arrays] p = multiprocessing.Process( target=sharedvalues_func, args=(values, arrays, shared_values, shared_arrays) ) p.start() p.join() assert p.exitcode == 0 #### def test(namespace=multiprocessing): global multiprocessing multiprocessing = namespace for func in [ test_value, test_queue, test_condition, test_semaphore, test_join_timeout, test_event, test_sharedvalues ]: print '\n\t######## %s\n' % func.__name__ func() ignore = multiprocessing.active_children() # cleanup any old processes if hasattr(multiprocessing, '_debug_info'): info = multiprocessing._debug_info() if info: print info raise ValueError, 'there should be no positive refcounts left' if __name__ == '__main__': multiprocessing.freeze_support() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print ' Using processes '.center(79, '-') namespace = multiprocessing elif sys.argv[1] == 'manager': print ' Using processes and a manager '.center(79, '-') namespace = multiprocessing.Manager() namespace.Process = multiprocessing.Process namespace.current_process = multiprocessing.current_process namespace.active_children = multiprocessing.active_children elif sys.argv[1] == 'threads': print ' Using threads '.center(79, '-') import multiprocessing.dummy as namespace else: print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0] raise SystemExit, 2 test(namespace) billiard-3.5.0.3/Doc/includes/mp_benchmarks.py0000644000175000017500000001263713132743245021103 0ustar omeromer00000000000000# # Simple benchmarks for the multiprocessing package # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # import time, sys, multiprocessing, threading, Queue, gc if sys.platform == 'win32': _timer = time.clock else: _timer = time.time delta = 1 #### TEST_QUEUESPEED def queuespeed_func(q, c, iterations): a = '0' * 256 c.acquire() c.notify() c.release() for i in xrange(iterations): q.put(a) q.put('STOP') def test_queuespeed(Process, q, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = Process(target=queuespeed_func, args=(q, c, iterations)) c.acquire() p.start() c.wait() c.release() result = None t = _timer() while result != 'STOP': result = q.get() elapsed = _timer() - t p.join() print iterations, 'objects passed through the queue in', elapsed, 'seconds' print 'average number/sec:', iterations/elapsed #### TEST_PIPESPEED def pipe_func(c, cond, iterations): a = '0' * 256 cond.acquire() cond.notify() cond.release() for i in xrange(iterations): c.send(a) c.send('STOP') def test_pipespeed(): c, d = multiprocessing.Pipe() cond = multiprocessing.Condition() elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 p = multiprocessing.Process(target=pipe_func, args=(d, cond, iterations)) cond.acquire() p.start() cond.wait() cond.release() result = None t = _timer() while result != 'STOP': result = c.recv() elapsed = _timer() - t p.join() print iterations, 'objects passed through connection in',elapsed,'seconds' print 'average number/sec:', iterations/elapsed #### TEST_SEQSPEED def test_seqspeed(seq): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in xrange(iterations): a = seq[5] elapsed = _timer()-t print iterations, 'iterations in', elapsed, 'seconds' print 'average number/sec:', iterations/elapsed #### TEST_LOCK def test_lockspeed(l): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 t = _timer() for i in xrange(iterations): l.acquire() l.release() elapsed = _timer()-t print iterations, 'iterations in', elapsed, 'seconds' print 'average number/sec:', iterations/elapsed #### TEST_CONDITION def conditionspeed_func(c, N): c.acquire() c.notify() for i in xrange(N): c.wait() c.notify() c.release() def test_conditionspeed(Process, c): elapsed = 0 iterations = 1 while elapsed < delta: iterations *= 2 c.acquire() p = Process(target=conditionspeed_func, args=(c, iterations)) p.start() c.wait() t = _timer() for i in xrange(iterations): c.notify() c.wait() elapsed = _timer()-t c.release() p.join() print iterations * 2, 'waits in', elapsed, 'seconds' print 'average number/sec:', iterations * 2 / elapsed #### def test(): manager = multiprocessing.Manager() gc.disable() print '\n\t######## testing Queue.Queue\n' test_queuespeed(threading.Thread, Queue.Queue(), threading.Condition()) print '\n\t######## testing multiprocessing.Queue\n' test_queuespeed(multiprocessing.Process, multiprocessing.Queue(), multiprocessing.Condition()) print '\n\t######## testing Queue managed by server process\n' test_queuespeed(multiprocessing.Process, manager.Queue(), manager.Condition()) print '\n\t######## testing multiprocessing.Pipe\n' test_pipespeed() print print '\n\t######## testing list\n' test_seqspeed(range(10)) print '\n\t######## testing list managed by server process\n' test_seqspeed(manager.list(range(10))) print '\n\t######## testing Array("i", ..., lock=False)\n' test_seqspeed(multiprocessing.Array('i', range(10), lock=False)) print '\n\t######## testing Array("i", ..., lock=True)\n' test_seqspeed(multiprocessing.Array('i', range(10), lock=True)) print print '\n\t######## testing threading.Lock\n' test_lockspeed(threading.Lock()) print '\n\t######## testing threading.RLock\n' test_lockspeed(threading.RLock()) print '\n\t######## testing multiprocessing.Lock\n' test_lockspeed(multiprocessing.Lock()) print '\n\t######## testing multiprocessing.RLock\n' test_lockspeed(multiprocessing.RLock()) print '\n\t######## testing lock managed by server process\n' test_lockspeed(manager.Lock()) print '\n\t######## testing rlock managed by server process\n' test_lockspeed(manager.RLock()) print print '\n\t######## testing threading.Condition\n' test_conditionspeed(threading.Thread, threading.Condition()) print '\n\t######## testing multiprocessing.Condition\n' test_conditionspeed(multiprocessing.Process, multiprocessing.Condition()) print '\n\t######## testing condition managed by a server process\n' test_conditionspeed(multiprocessing.Process, manager.Condition()) gc.enable() if __name__ == '__main__': multiprocessing.freeze_support() test() billiard-3.5.0.3/Doc/includes/mp_webserver.py0000644000175000017500000000401213132743245020756 0ustar omeromer00000000000000# # Example where a pool of http servers share a single listening socket # # On Windows this module depends on the ability to pickle a socket # object so that the worker processes can inherit a copy of the server # object. (We import `multiprocessing.reduction` to enable this pickling.) # # Not sure if we should synchronize access to `socket.accept()` method by # using a process-shared lock -- does not seem to be necessary. # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # import os import sys from multiprocessing import Process, current_process, freeze_support from BaseHTTPServer import HTTPServer from SimpleHTTPServer import SimpleHTTPRequestHandler if sys.platform == 'win32': import multiprocessing.reduction # make sockets pickable/inheritable def note(format, *args): sys.stderr.write('[%s]\t%s\n' % (current_process().name, format%args)) class RequestHandler(SimpleHTTPRequestHandler): # we override log_message() to show which process is handling the request def log_message(self, format, *args): note(format, *args) def serve_forever(server): note('starting server') try: server.serve_forever() except KeyboardInterrupt: pass def runpool(address, number_of_processes): # create a single server object -- children will each inherit a copy server = HTTPServer(address, RequestHandler) # create child processes to act as workers for i in range(number_of_processes-1): Process(target=serve_forever, args=(server,)).start() # main process also acts as a worker serve_forever(server) def test(): DIR = os.path.join(os.path.dirname(__file__), '..') ADDRESS = ('localhost', 8000) NUMBER_OF_PROCESSES = 4 print 'Serving at http://%s:%d using %d worker processes' % \ (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES) print 'To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32'] os.chdir(DIR) runpool(ADDRESS, NUMBER_OF_PROCESSES) if __name__ == '__main__': freeze_support() test() billiard-3.5.0.3/Doc/includes/mp_pool.py0000644000175000017500000001555713132743245017743 0ustar omeromer00000000000000# # A test of `multiprocessing.Pool` class # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # import multiprocessing import time import random import sys # # Functions used by test code # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % ( multiprocessing.current_process().name, func.__name__, args, result ) def calculatestar(args): return calculate(*args) def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b def f(x): return 1.0 / (x-5.0) def pow3(x): return x**3 def noop(x): pass # # Test code # def test(): print 'cpu_count() = %d\n' % multiprocessing.cpu_count() # # Create pool # PROCESSES = 4 print 'Creating pool with %d processes\n' % PROCESSES pool = multiprocessing.Pool(PROCESSES) print 'pool = %s' % pool print # # Tests # TASKS = [(mul, (i, 7)) for i in range(10)] + \ [(plus, (i, 8)) for i in range(10)] results = [pool.apply_async(calculate, t) for t in TASKS] imap_it = pool.imap(calculatestar, TASKS) imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) print 'Ordered results using pool.apply_async():' for r in results: print '\t', r.get() print print 'Ordered results using pool.imap():' for x in imap_it: print '\t', x print print 'Unordered results using pool.imap_unordered():' for x in imap_unordered_it: print '\t', x print print 'Ordered results using pool.map() --- will block till complete:' for x in pool.map(calculatestar, TASKS): print '\t', x print # # Simple benchmarks # N = 100000 print 'def pow3(x): return x**3' t = time.time() A = map(pow3, xrange(N)) print '\tmap(pow3, xrange(%d)):\n\t\t%s seconds' % \ (N, time.time() - t) t = time.time() B = pool.map(pow3, xrange(N)) print '\tpool.map(pow3, xrange(%d)):\n\t\t%s seconds' % \ (N, time.time() - t) t = time.time() C = list(pool.imap(pow3, xrange(N), chunksize=N//8)) print '\tlist(pool.imap(pow3, xrange(%d), chunksize=%d)):\n\t\t%s' \ ' seconds' % (N, N//8, time.time() - t) assert A == B == C, (len(A), len(B), len(C)) print L = [None] * 1000000 print 'def noop(x): pass' print 'L = [None] * 1000000' t = time.time() A = map(noop, L) print '\tmap(noop, L):\n\t\t%s seconds' % \ (time.time() - t) t = time.time() B = pool.map(noop, L) print '\tpool.map(noop, L):\n\t\t%s seconds' % \ (time.time() - t) t = time.time() C = list(pool.imap(noop, L, chunksize=len(L)//8)) print '\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ (len(L)//8, time.time() - t) assert A == B == C, (len(A), len(B), len(C)) print del A, B, C, L # # Test error handling # print 'Testing error handling:' try: print pool.apply(f, (5,)) except ZeroDivisionError: print '\tGot ZeroDivisionError as expected from pool.apply()' else: raise AssertionError, 'expected ZeroDivisionError' try: print pool.map(f, range(10)) except ZeroDivisionError: print '\tGot ZeroDivisionError as expected from pool.map()' else: raise AssertionError, 'expected ZeroDivisionError' try: print list(pool.imap(f, range(10))) except ZeroDivisionError: print '\tGot ZeroDivisionError as expected from list(pool.imap())' else: raise AssertionError, 'expected ZeroDivisionError' it = pool.imap(f, range(10)) for i in range(10): try: x = it.next() except ZeroDivisionError: if i == 5: pass except StopIteration: break else: if i == 5: raise AssertionError, 'expected ZeroDivisionError' assert i == 9 print '\tGot ZeroDivisionError as expected from IMapIterator.next()' print # # Testing timeouts # print 'Testing ApplyResult.get() with timeout:', res = pool.apply_async(calculate, TASKS[0]) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % res.get(0.02)) break except multiprocessing.TimeoutError: sys.stdout.write('.') print print print 'Testing IMapIterator.next() with timeout:', it = pool.imap(calculatestar, TASKS) while 1: sys.stdout.flush() try: sys.stdout.write('\n\t%s' % it.next(0.02)) except StopIteration: break except multiprocessing.TimeoutError: sys.stdout.write('.') print print # # Testing callback # print 'Testing callback:' A = [] B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] r = pool.apply_async(mul, (7, 8), callback=A.append) r.wait() r = pool.map_async(pow3, range(10), callback=A.extend) r.wait() if A == B: print '\tcallbacks succeeded\n' else: print '\t*** callbacks failed\n\t\t%s != %s\n' % (A, B) # # Check there are no outstanding tasks # assert not pool._cache, 'cache = %r' % pool._cache # # Check close() methods # print 'Testing close():' for worker in pool._pool: assert worker.is_alive() result = pool.apply_async(time.sleep, [0.5]) pool.close() pool.join() assert result.get() is None for worker in pool._pool: assert not worker.is_alive() print '\tclose() succeeded\n' # # Check terminate() method # print 'Testing terminate():' pool = multiprocessing.Pool(2) DELTA = 0.1 ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)] pool.terminate() pool.join() for worker in pool._pool: assert not worker.is_alive() print '\tterminate() succeeded\n' # # Check garbage collection # print 'Testing garbage collection:' pool = multiprocessing.Pool(2) DELTA = 0.1 processes = pool._pool ignore = pool.apply(pow3, [2]) results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)] results = pool = None time.sleep(DELTA * 2) for worker in processes: assert not worker.is_alive() print '\tgarbage collection succeeded\n' if __name__ == '__main__': multiprocessing.freeze_support() assert len(sys.argv) in (1, 2) if len(sys.argv) == 1 or sys.argv[1] == 'processes': print ' Using processes '.center(79, '-') elif sys.argv[1] == 'threads': print ' Using threads '.center(79, '-') import multiprocessing.dummy as multiprocessing else: print 'Usage:\n\t%s [processes | threads]' % sys.argv[0] raise SystemExit(2) test() billiard-3.5.0.3/Doc/includes/mp_workers.py0000644000175000017500000000401513132743245020451 0ustar omeromer00000000000000# # Simple example which uses a pool of workers to carry out some tasks. # # Notice that the results will probably not come out of the output # queue in the same in the same order as the corresponding tasks were # put on the input queue. If it is important to get the results back # in the original order then consider using `Pool.map()` or # `Pool.imap()` (which will save on the amount of code needed anyway). # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # import time import random from multiprocessing import Process, Queue, current_process, freeze_support # # Function run by worker processes # def worker(input, output): for func, args in iter(input.get, 'STOP'): result = calculate(func, args) output.put(result) # # Function used to calculate result # def calculate(func, args): result = func(*args) return '%s says that %s%s = %s' % \ (current_process().name, func.__name__, args, result) # # Functions referenced by tasks # def mul(a, b): time.sleep(0.5*random.random()) return a * b def plus(a, b): time.sleep(0.5*random.random()) return a + b # # # def test(): NUMBER_OF_PROCESSES = 4 TASKS1 = [(mul, (i, 7)) for i in range(20)] TASKS2 = [(plus, (i, 8)) for i in range(10)] # Create queues task_queue = Queue() done_queue = Queue() # Submit tasks for task in TASKS1: task_queue.put(task) # Start worker processes for i in range(NUMBER_OF_PROCESSES): Process(target=worker, args=(task_queue, done_queue)).start() # Get and print results print 'Unordered results:' for i in range(len(TASKS1)): print '\t', done_queue.get() # Add more tasks using `put()` for task in TASKS2: task_queue.put(task) # Get and print some more results for i in range(len(TASKS2)): print '\t', done_queue.get() # Tell child processes to stop for i in range(NUMBER_OF_PROCESSES): task_queue.put('STOP') if __name__ == '__main__': freeze_support() test() billiard-3.5.0.3/Doc/glossary.rst0000644000175000017500000000330113132743245016473 0ustar omeromer00000000000000.. _glossary: ******** Glossary ******** .. glossary:: bytecode Python source code is compiled into bytecode, the internal representation of a Python program in the interpreter. The bytecode is also cached in ``.pyc`` and ``.pyo`` files so that executing the same file is faster the second time (recompilation from source to bytecode can be avoided). This "intermediate language" is said to run on a :term:`virtual machine` that executes the machine code corresponding to each bytecode. CPython The canonical implementation of the Python programming language. The term "CPython" is used in contexts when necessary to distinguish this implementation from others such as Jython or IronPython. GIL See :term:`global interpreter lock`. global interpreter lock The lock used by Python threads to assure that only one thread executes in the :term:`CPython` :term:`virtual machine` at a time. This simplifies the CPython implementation by assuring that no two processes can access the same memory at the same time. Locking the entire interpreter makes it easier for the interpreter to be multi-threaded, at the expense of much of the parallelism afforded by multi-processor machines. Efforts have been made in the past to create a "free-threaded" interpreter (one which locks shared data at a much finer granularity), but so far none have been successful because performance suffered in the common single-processor case. virtual machine A computer defined entirely in software. Python's virtual machine executes the :term:`bytecode` emitted by the bytecode compiler. billiard-3.5.0.3/Doc/library/0000755000175000017500000000000013132746522015546 5ustar omeromer00000000000000billiard-3.5.0.3/Doc/library/multiprocessing.rst0000644000175000017500000025073613132743245021543 0ustar omeromer00000000000000:mod:`multiprocessing` --- Process-based parallelism ==================================================== .. module:: multiprocessing :synopsis: Process-based parallelism. Introduction ------------ :mod:`multiprocessing` is a package that supports spawning processes using an API similar to the :mod:`threading` module. The :mod:`multiprocessing` package offers both local and remote concurrency, effectively side-stepping the :term:`Global Interpreter Lock` by using subprocesses instead of threads. Due to this, the :mod:`multiprocessing` module allows the programmer to fully leverage multiple processors on a given machine. It runs on both Unix and Windows. .. note:: Some of this package's functionality requires a functioning shared semaphore implementation on the host operating system. Without one, the :mod:`multiprocessing.synchronize` module will be disabled, and attempts to import it will result in an :exc:`ImportError`. See :issue:`3770` for additional information. .. note:: Functionality within this package requires that the ``__main__`` module be importable by the children. This is covered in :ref:`multiprocessing-programming` however it is worth pointing out here. This means that some examples, such as the :class:`multiprocessing.Pool` examples will not work in the interactive interpreter. For example:: >>> from multiprocessing import Pool >>> p = Pool(5) >>> def f(x): ... return x*x ... >>> p.map(f, [1,2,3]) Process PoolWorker-1: Process PoolWorker-2: Process PoolWorker-3: Traceback (most recent call last): Traceback (most recent call last): Traceback (most recent call last): AttributeError: 'module' object has no attribute 'f' AttributeError: 'module' object has no attribute 'f' AttributeError: 'module' object has no attribute 'f' (If you try this it will actually output three full tracebacks interleaved in a semi-random fashion, and then you may have to stop the master process somehow.) The :class:`Process` class ~~~~~~~~~~~~~~~~~~~~~~~~~~ In :mod:`multiprocessing`, processes are spawned by creating a :class:`Process` object and then calling its :meth:`~Process.start` method. :class:`Process` follows the API of :class:`threading.Thread`. A trivial example of a multiprocess program is :: from multiprocessing import Process def f(name): print('hello', name) if __name__ == '__main__': p = Process(target=f, args=('bob',)) p.start() p.join() To show the individual process IDs involved, here is an expanded example:: from multiprocessing import Process import os def info(title): print(title) print('module name:', __name__) print('parent process:', os.getppid()) print('process id:', os.getpid()) def f(name): info('function f') print('hello', name) if __name__ == '__main__': info('main line') p = Process(target=f, args=('bob',)) p.start() p.join() For an explanation of why (on Windows) the ``if __name__ == '__main__'`` part is necessary, see :ref:`multiprocessing-programming`. Exchanging objects between processes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :mod:`multiprocessing` supports two types of communication channel between processes: **Queues** The :class:`Queue` class is a near clone of :class:`Queue.Queue`. For example:: from multiprocessing import Process, Queue def f(q): q.put([42, None, 'hello']) if __name__ == '__main__': q = Queue() p = Process(target=f, args=(q,)) p.start() print(q.get()) # prints "[42, None, 'hello']" p.join() Queues are thread and process safe, but note that they must never be instantiated as a side effect of importing a module: this can lead to a deadlock! (see :ref:`threaded-imports`) **Pipes** The :func:`Pipe` function returns a pair of connection objects connected by a pipe which by default is duplex (two-way). For example:: from multiprocessing import Process, Pipe def f(conn): conn.send([42, None, 'hello']) conn.close() if __name__ == '__main__': parent_conn, child_conn = Pipe() p = Process(target=f, args=(child_conn,)) p.start() print(parent_conn.recv()) # prints "[42, None, 'hello']" p.join() The two connection objects returned by :func:`Pipe` represent the two ends of the pipe. Each connection object has :meth:`~Connection.send` and :meth:`~Connection.recv` methods (among others). Note that data in a pipe may become corrupted if two processes (or threads) try to read from or write to the *same* end of the pipe at the same time. Of course there is no risk of corruption from processes using different ends of the pipe at the same time. Synchronization between processes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :mod:`multiprocessing` contains equivalents of all the synchronization primitives from :mod:`threading`. For instance one can use a lock to ensure that only one process prints to standard output at a time:: from multiprocessing import Process, Lock def f(l, i): l.acquire() print('hello world', i) l.release() if __name__ == '__main__': lock = Lock() for num in range(10): Process(target=f, args=(lock, num)).start() Without using the lock output from the different processes is liable to get all mixed up. Sharing state between processes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As mentioned above, when doing concurrent programming it is usually best to avoid using shared state as far as possible. This is particularly true when using multiple processes. However, if you really do need to use some shared data then :mod:`multiprocessing` provides a couple of ways of doing so. **Shared memory** Data can be stored in a shared memory map using :class:`Value` or :class:`Array`. For example, the following code :: from multiprocessing import Process, Value, Array def f(n, a): n.value = 3.1415927 for i in range(len(a)): a[i] = -a[i] if __name__ == '__main__': num = Value('d', 0.0) arr = Array('i', range(10)) p = Process(target=f, args=(num, arr)) p.start() p.join() print(num.value) print(arr[:]) will print :: 3.1415927 [0, -1, -2, -3, -4, -5, -6, -7, -8, -9] The ``'d'`` and ``'i'`` arguments used when creating ``num`` and ``arr`` are typecodes of the kind used by the :mod:`array` module: ``'d'`` indicates a double precision float and ``'i'`` indicates a signed integer. These shared objects will be process and thread-safe. For more flexibility in using shared memory one can use the :mod:`multiprocessing.sharedctypes` module which supports the creation of arbitrary ctypes objects allocated from shared memory. **Server process** A manager object returned by :func:`Manager` controls a server process which holds Python objects and allows other processes to manipulate them using proxies. A manager returned by :func:`Manager` will support types :class:`list`, :class:`dict`, :class:`Namespace`, :class:`Lock`, :class:`RLock`, :class:`Semaphore`, :class:`BoundedSemaphore`, :class:`Condition`, :class:`Event`, :class:`Queue`, :class:`Value` and :class:`Array`. For example, :: from multiprocessing import Process, Manager def f(d, l): d[1] = '1' d['2'] = 2 d[0.25] = None l.reverse() if __name__ == '__main__': manager = Manager() d = manager.dict() l = manager.list(range(10)) p = Process(target=f, args=(d, l)) p.start() p.join() print(d) print(l) will print :: {0.25: None, 1: '1', '2': 2} [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory. Using a pool of workers ~~~~~~~~~~~~~~~~~~~~~~~ The :class:`~multiprocessing.pool.Pool` class represents a pool of worker processes. It has methods which allows tasks to be offloaded to the worker processes in a few different ways. For example:: from multiprocessing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.apply_async(f, [10]) # evaluate "f(10)" asynchronously print(result.get(timeout=1)) # prints "100" unless your computer is *very* slow print(pool.map(f, range(10))) # prints "[0, 1, 4,..., 81]" Reference --------- The :mod:`multiprocessing` package mostly replicates the API of the :mod:`threading` module. :class:`Process` and exceptions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. class:: Process([group[, target[, name[, args[, kwargs]]]]], daemon=None) Process objects represent activity that is run in a separate process. The :class:`Process` class has equivalents of all the methods of :class:`threading.Thread`. The constructor should always be called with keyword arguments. *group* should always be ``None``; it exists solely for compatibility with :class:`threading.Thread`. *target* is the callable object to be invoked by the :meth:`run()` method. It defaults to ``None``, meaning nothing is called. *name* is the process name. By default, a unique name is constructed of the form 'Process-N\ :sub:`1`:N\ :sub:`2`:...:N\ :sub:`k`' where N\ :sub:`1`,N\ :sub:`2`,...,N\ :sub:`k` is a sequence of integers whose length is determined by the *generation* of the process. *args* is the argument tuple for the target invocation. *kwargs* is a dictionary of keyword arguments for the target invocation. If provided, the keyword-only *daemon* argument sets the process :attr:`daemon` flag to ``True`` or ``False``. If ``None`` (the default), this flag will be inherited from the creating process. By default, no arguments are passed to *target*. If a subclass overrides the constructor, it must make sure it invokes the base class constructor (:meth:`Process.__init__`) before doing anything else to the process. .. versionchanged:: 3.3 Added the *daemon* argument. .. method:: run() Method representing the process's activity. You may override this method in a subclass. The standard :meth:`run` method invokes the callable object passed to the object's constructor as the target argument, if any, with sequential and keyword arguments taken from the *args* and *kwargs* arguments, respectively. .. method:: start() Start the process's activity. This must be called at most once per process object. It arranges for the object's :meth:`run` method to be invoked in a separate process. .. method:: join([timeout]) If the optional argument *timeout* is ``None`` (the default), the method blocks until the process whose :meth:`join` method is called terminates. If *timeout* is a positive number, it blocks at most *timeout* seconds. A process can be joined many times. A process cannot join itself because this would cause a deadlock. It is an error to attempt to join a process before it has been started. .. attribute:: name The process's name. The name is a string used for identification purposes only. It has no semantics. Multiple processes may be given the same name. The initial name is set by the constructor. .. method:: is_alive Return whether the process is alive. Roughly, a process object is alive from the moment the :meth:`start` method returns until the child process terminates. .. attribute:: daemon The process's daemon flag, a Boolean value. This must be set before :meth:`start` is called. The initial value is inherited from the creating process. When a process exits, it attempts to terminate all of its daemonic child processes. Note that a daemonic process is not allowed to create child processes. Otherwise a daemonic process would leave its children orphaned if it gets terminated when its parent process exits. Additionally, these are **not** Unix daemons or services, they are normal processes that will be terminated (and not joined) if non-daemonic processes have exited. In addition to the :class:`Threading.Thread` API, :class:`Process` objects also support the following attributes and methods: .. attribute:: pid Return the process ID. Before the process is spawned, this will be ``None``. .. attribute:: exitcode The child's exit code. This will be ``None`` if the process has not yet terminated. A negative value *-N* indicates that the child was terminated by signal *N*. .. attribute:: authkey The process's authentication key (a byte string). When :mod:`multiprocessing` is initialized the main process is assigned a random string using :func:`os.random`. When a :class:`Process` object is created, it will inherit the authentication key of its parent process, although this may be changed by setting :attr:`authkey` to another byte string. See :ref:`multiprocessing-auth-keys`. .. attribute:: sentinel A numeric handle of a system object which will become "ready" when the process ends. You can use this value if you want to wait on several events at once using :func:`multiprocessing.connection.wait`. Otherwise calling :meth:`join()` is simpler. On Windows, this is an OS handle usable with the ``WaitForSingleObject`` and ``WaitForMultipleObjects`` family of API calls. On Unix, this is a file descriptor usable with primitives from the :mod:`select` module. .. versionadded:: 3.3 .. method:: terminate() Terminate the process. On Unix this is done using the ``SIGTERM`` signal; on Windows :c:func:`TerminateProcess` is used. Note that exit handlers and finally clauses, etc., will not be executed. Note that descendant processes of the process will *not* be terminated -- they will simply become orphaned. .. warning:: If this method is used when the associated process is using a pipe or queue then the pipe or queue is liable to become corrupted and may become unusable by other process. Similarly, if the process has acquired a lock or semaphore etc. then terminating it is liable to cause other processes to deadlock. Note that the :meth:`start`, :meth:`join`, :meth:`is_alive`, :meth:`terminate` and :attr:`exit_code` methods should only be called by the process that created the process object. Example usage of some of the methods of :class:`Process`: .. doctest:: >>> import multiprocessing, time, signal >>> p = multiprocessing.Process(target=time.sleep, args=(1000,)) >>> print(p, p.is_alive()) False >>> p.start() >>> print(p, p.is_alive()) True >>> p.terminate() >>> time.sleep(0.1) >>> print(p, p.is_alive()) False >>> p.exitcode == -signal.SIGTERM True .. exception:: BufferTooShort Exception raised by :meth:`Connection.recv_bytes_into()` when the supplied buffer object is too small for the message read. If ``e`` is an instance of :exc:`BufferTooShort` then ``e.args[0]`` will give the message as a byte string. Pipes and Queues ~~~~~~~~~~~~~~~~ When using multiple processes, one generally uses message passing for communication between processes and avoids having to use any synchronization primitives like locks. For passing messages one can use :func:`Pipe` (for a connection between two processes) or a queue (which allows multiple producers and consumers). The :class:`Queue`, :class:`SimpleQueue` and :class:`JoinableQueue` types are multi-producer, multi-consumer FIFO queues modelled on the :class:`Queue.Queue` class in the standard library. They differ in that :class:`Queue` lacks the :meth:`~Queue.Queue.task_done` and :meth:`~Queue.Queue.join` methods introduced into Python 2.5's :class:`queue.Queue` class. If you use :class:`JoinableQueue` then you **must** call :meth:`JoinableQueue.task_done` for each task removed from the queue or else the semaphore used to count the number of unfinished tasks may eventually overflow, raising an exception. Note that one can also create a shared queue by using a manager object -- see :ref:`multiprocessing-managers`. .. note:: :mod:`multiprocessing` uses the usual :exc:`Queue.Empty` and :exc:`Queue.Full` exceptions to signal a timeout. They are not available in the :mod:`multiprocessing` namespace so you need to import them from :mod:`queue`. .. warning:: If a process is killed using :meth:`Process.terminate` or :func:`os.kill` while it is trying to use a :class:`Queue`, then the data in the queue is likely to become corrupted. This may cause any other process to get an exception when it tries to use the queue later on. .. warning:: As mentioned above, if a child process has put items on a queue (and it has not used :meth:`JoinableQueue.cancel_join_thread`), then that process will not terminate until all buffered items have been flushed to the pipe. This means that if you try joining that process you may get a deadlock unless you are sure that all items which have been put on the queue have been consumed. Similarly, if the child process is non-daemonic then the parent process may hang on exit when it tries to join all its non-daemonic children. Note that a queue created using a manager does not have this issue. See :ref:`multiprocessing-programming`. For an example of the usage of queues for interprocess communication see :ref:`multiprocessing-examples`. .. function:: Pipe([duplex]) Returns a pair ``(conn1, conn2)`` of :class:`Connection` objects representing the ends of a pipe. If *duplex* is ``True`` (the default) then the pipe is bidirectional. If *duplex* is ``False`` then the pipe is unidirectional: ``conn1`` can only be used for receiving messages and ``conn2`` can only be used for sending messages. .. class:: Queue([maxsize]) Returns a process shared queue implemented using a pipe and a few locks/semaphores. When a process first puts an item on the queue a feeder thread is started which transfers objects from a buffer into the pipe. The usual :exc:`Queue.Empty` and :exc:`Queue.Full` exceptions from the standard library's :mod:`Queue` module are raised to signal timeouts. :class:`Queue` implements all the methods of :class:`Queue.Queue` except for :meth:`~Queue.Queue.task_done` and :meth:`~Queue.Queue.join`. .. method:: qsize() Return the approximate size of the queue. Because of multithreading/multiprocessing semantics, this number is not reliable. Note that this may raise :exc:`NotImplementedError` on Unix platforms like macOS where ``sem_getvalue()`` is not implemented. .. method:: empty() Return ``True`` if the queue is empty, ``False`` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. .. method:: full() Return ``True`` if the queue is full, ``False`` otherwise. Because of multithreading/multiprocessing semantics, this is not reliable. .. method:: put(obj[, block[, timeout]]) Put obj into the queue. If the optional argument *block* is ``True`` (the default) and *timeout* is ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :exc:`queue.Full` exception if no free slot was available within that time. Otherwise (*block* is ``False``), put an item on the queue if a free slot is immediately available, else raise the :exc:`queue.Full` exception (*timeout* is ignored in that case). .. method:: put_nowait(obj) Equivalent to ``put(obj, False)``. .. method:: get([block[, timeout]]) Remove and return an item from the queue. If optional args *block* is ``True`` (the default) and *timeout* is ``None`` (the default), block if necessary until an item is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :exc:`queue.Empty` exception if no item was available within that time. Otherwise (block is ``False``), return an item if one is immediately available, else raise the :exc:`queue.Empty` exception (*timeout* is ignored in that case). .. method:: get_nowait() get_no_wait() Equivalent to ``get(False)``. :class:`multiprocessing.Queue` has a few additional methods not found in :class:`queue.Queue`. These methods are usually unnecessary for most code: .. method:: close() Indicate that no more data will be put on this queue by the current process. The background thread will quit once it has flushed all buffered data to the pipe. This is called automatically when the queue is garbage collected. .. method:: join_thread() Join the background thread. This can only be used after :meth:`close` has been called. It blocks until the background thread exits, ensuring that all data in the buffer has been flushed to the pipe. By default if a process is not the creator of the queue then on exit it will attempt to join the queue's background thread. The process can call :meth:`cancel_join_thread` to make :meth:`join_thread` do nothing. .. method:: cancel_join_thread() Prevent :meth:`join_thread` from blocking. In particular, this prevents the background thread from being joined automatically when the process exits -- see :meth:`join_thread`. .. class:: SimpleQueue() It is a simplified :class:`Queue` type, very close to a locked :class:`Pipe`. .. method:: empty() Return ``True`` if the queue is empty, ``False`` otherwise. .. method:: get() Remove and return an item from the queue. .. method:: put(item) Put *item* into the queue. .. class:: JoinableQueue([maxsize]) :class:`JoinableQueue`, a :class:`Queue` subclass, is a queue which additionally has :meth:`task_done` and :meth:`join` methods. .. method:: task_done() Indicate that a formerly enqueued task is complete. Used by queue consumer threads. For each :meth:`~Queue.get` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue that the processing on the task is complete. If a :meth:`~Queue.join` is currently blocking, it will resume when all items have been processed (meaning that a :meth:`task_done` call was received for every item that had been :meth:`~Queue.put` into the queue). Raises a :exc:`ValueError` if called more times than there were items placed in the queue. .. method:: join() Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls :meth:`task_done` to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, :meth:`~Queue.join` unblocks. Miscellaneous ~~~~~~~~~~~~~ .. function:: active_children() Return list of all live children of the current process. Calling this has the side affect of "joining" any processes which have already finished. .. function:: cpu_count() Return the number of CPUs in the system. May raise :exc:`NotImplementedError`. .. function:: current_process() Return the :class:`Process` object corresponding to the current process. An analogue of :func:`threading.current_thread`. .. function:: freeze_support() Add support for when a program which uses :mod:`multiprocessing` has been frozen to produce a Windows executable. (Has been tested with **py2exe**, **PyInstaller** and **cx_Freeze**.) One needs to call this function straight after the ``if __name__ == '__main__'`` line of the main module. For example:: from multiprocessing import Process, freeze_support def f(): print('hello world!') if __name__ == '__main__': freeze_support() Process(target=f).start() If the ``freeze_support()`` line is omitted then trying to run the frozen executable will raise :exc:`RuntimeError`. If the module is being run normally by the Python interpreter then :func:`freeze_support` has no effect. .. function:: set_executable() Sets the path of the Python interpreter to use when starting a child process. (By default :data:`sys.executable` is used). Embedders will probably need to do some thing like :: set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe')) before they can create child processes. (Windows only) .. note:: :mod:`multiprocessing` contains no analogues of :func:`threading.active_count`, :func:`threading.enumerate`, :func:`threading.settrace`, :func:`threading.setprofile`, :class:`threading.Timer`, or :class:`threading.local`. Connection Objects ~~~~~~~~~~~~~~~~~~ Connection objects allow the sending and receiving of picklable objects or strings. They can be thought of as message oriented connected sockets. Connection objects are usually created using :func:`Pipe` -- see also :ref:`multiprocessing-listeners-clients`. .. class:: Connection .. method:: send(obj) Send an object to the other end of the connection which should be read using :meth:`recv`. The object must be picklable. Very large pickles (approximately 32 MB+, though it depends on the OS) may raise a ValueError exception. .. method:: recv() Return an object sent from the other end of the connection using :meth:`send`. Blocks until there its something to receive. Raises :exc:`EOFError` if there is nothing left to receive and the other end was closed. .. method:: fileno() Return the file descriptor or handle used by the connection. .. method:: close() Close the connection. This is called automatically when the connection is garbage collected. .. method:: poll([timeout]) Return whether there is any data available to be read. If *timeout* is not specified then it will return immediately. If *timeout* is a number then this specifies the maximum time in seconds to block. If *timeout* is ``None`` then an infinite timeout is used. Note that multiple connection objects may be polled at once by using :func:`multiprocessing.connection.wait`. .. method:: send_bytes(buffer[, offset[, size]]) Send byte data from an object supporting the buffer interface as a complete message. If *offset* is given then data is read from that position in *buffer*. If *size* is given then that many bytes will be read from buffer. Very large buffers (approximately 32 MB+, though it depends on the OS) may raise a :exc:`ValueError` exception .. method:: recv_bytes([maxlength]) Return a complete message of byte data sent from the other end of the connection as a string. Blocks until there is something to receive. Raises :exc:`EOFError` if there is nothing left to receive and the other end has closed. If *maxlength* is specified and the message is longer than *maxlength* then :exc:`OSError` is raised and the connection will no longer be readable. .. versionchanged:: 3.3 This function used to raise a :exc:`IOError`, which is now an alias of :exc:`OSError`. .. method:: recv_bytes_into(buffer[, offset]) Read into *buffer* a complete message of byte data sent from the other end of the connection and return the number of bytes in the message. Blocks until there is something to receive. Raises :exc:`EOFError` if there is nothing left to receive and the other end was closed. *buffer* must be an object satisfying the writable buffer interface. If *offset* is given then the message will be written into the buffer from that position. Offset must be a non-negative integer less than the length of *buffer* (in bytes). If the buffer is too short then a :exc:`BufferTooShort` exception is raised and the complete message is available as ``e.args[0]`` where ``e`` is the exception instance. For example: .. doctest:: >>> from multiprocessing import Pipe >>> a, b = Pipe() >>> a.send([1, 'hello', None]) >>> b.recv() [1, 'hello', None] >>> b.send_bytes(b'thank you') >>> a.recv_bytes() b'thank you' >>> import array >>> arr1 = array.array('i', range(5)) >>> arr2 = array.array('i', [0] * 10) >>> a.send_bytes(arr1) >>> count = b.recv_bytes_into(arr2) >>> assert count == len(arr1) * arr1.itemsize >>> arr2 array('i', [0, 1, 2, 3, 4, 0, 0, 0, 0, 0]) .. warning:: The :meth:`Connection.recv` method automatically unpickles the data it receives, which can be a security risk unless you can trust the process which sent the message. Therefore, unless the connection object was produced using :func:`Pipe` you should only use the :meth:`~Connection.recv` and :meth:`~Connection.send` methods after performing some sort of authentication. See :ref:`multiprocessing-auth-keys`. .. warning:: If a process is killed while it is trying to read or write to a pipe then the data in the pipe is likely to become corrupted, because it may become impossible to be sure where the message boundaries lie. Synchronization primitives ~~~~~~~~~~~~~~~~~~~~~~~~~~ Generally synchronization primitives are not as necessary in a multiprocess program as they are in a multithreaded program. See the documentation for :mod:`threading` module. Note that one can also create synchronization primitives by using a manager object -- see :ref:`multiprocessing-managers`. .. class:: BoundedSemaphore([value]) A bounded semaphore object: a clone of :class:`threading.BoundedSemaphore`. (On macOS, this is indistinguishable from :class:`Semaphore` because ``sem_getvalue()`` is not implemented on that platform). .. class:: Condition([lock]) A condition variable: a clone of :class:`threading.Condition`. If *lock* is specified then it should be a :class:`Lock` or :class:`RLock` object from :mod:`multiprocessing`. .. versionchanged:: 3.3 The :meth:`wait_for` method was added. .. class:: Event() A clone of :class:`threading.Event`. This method returns the state of the internal semaphore on exit, so it will always return ``True`` except if a timeout is given and the operation times out. .. versionchanged:: 3.1 Previously, the method always returned ``None``. .. class:: Lock() A non-recursive lock object: a clone of :class:`threading.Lock`. .. class:: RLock() A recursive lock object: a clone of :class:`threading.RLock`. .. class:: Semaphore([value]) A semaphore object: a clone of :class:`threading.Semaphore`. .. note:: On macOS, ``sem_timedwait`` is unsupported, so calling ``acquire()`` with a timeout will emulate that function's behavior using a sleeping loop. .. note:: If the SIGINT signal generated by Ctrl-C arrives while the main thread is blocked by a call to :meth:`BoundedSemaphore.acquire`, :meth:`Lock.acquire`, :meth:`RLock.acquire`, :meth:`Semaphore.acquire`, :meth:`Condition.acquire` or :meth:`Condition.wait` then the call will be immediately interrupted and :exc:`KeyboardInterrupt` will be raised. This differs from the behaviour of :mod:`threading` where SIGINT will be ignored while the equivalent blocking calls are in progress. Shared :mod:`ctypes` Objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is possible to create shared objects using shared memory which can be inherited by child processes. .. function:: Value(typecode_or_type, *args[, lock]) Return a :mod:`ctypes` object allocated from shared memory. By default the return value is actually a synchronized wrapper for the object. *typecode_or_type* determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the :mod:`array` module. *\*args* is passed on to the constructor for the type. If *lock* is ``True`` (the default) then a new lock object is created to synchronize access to the value. If *lock* is a :class:`Lock` or :class:`RLock` object then that will be used to synchronize access to the value. If *lock* is ``False`` then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that *lock* is a keyword-only argument. .. function:: Array(typecode_or_type, size_or_initializer, *, lock=True) Return a ctypes array allocated from shared memory. By default the return value is actually a synchronized wrapper for the array. *typecode_or_type* determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the :mod:`array` module. If *size_or_initializer* is an integer, then it determines the length of the array, and the array will be initially zeroed. Otherwise, *size_or_initializer* is a sequence which is used to initialize the array and whose length determines the length of the array. If *lock* is ``True`` (the default) then a new lock object is created to synchronize access to the value. If *lock* is a :class:`Lock` or :class:`RLock` object then that will be used to synchronize access to the value. If *lock* is ``False`` then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that *lock* is a keyword only argument. Note that an array of :data:`ctypes.c_char` has *value* and *raw* attributes which allow one to use it to store and retrieve strings. The :mod:`multiprocessing.sharedctypes` module >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> .. module:: multiprocessing.sharedctypes :synopsis: Allocate ctypes objects from shared memory. The :mod:`multiprocessing.sharedctypes` module provides functions for allocating :mod:`ctypes` objects from shared memory which can be inherited by child processes. .. note:: Although it is possible to store a pointer in shared memory remember that this will refer to a location in the address space of a specific process. However, the pointer is quite likely to be invalid in the context of a second process and trying to dereference the pointer from the second process may cause a crash. .. function:: RawArray(typecode_or_type, size_or_initializer) Return a ctypes array allocated from shared memory. *typecode_or_type* determines the type of the elements of the returned array: it is either a ctypes type or a one character typecode of the kind used by the :mod:`array` module. If *size_or_initializer* is an integer then it determines the length of the array, and the array will be initially zeroed. Otherwise *size_or_initializer* is a sequence which is used to initialize the array and whose length determines the length of the array. Note that setting and getting an element is potentially non-atomic -- use :func:`Array` instead to make sure that access is automatically synchronized using a lock. .. function:: RawValue(typecode_or_type, *args) Return a ctypes object allocated from shared memory. *typecode_or_type* determines the type of the returned object: it is either a ctypes type or a one character typecode of the kind used by the :mod:`array` module. *\*args* is passed on to the constructor for the type. Note that setting and getting the value is potentially non-atomic -- use :func:`Value` instead to make sure that access is automatically synchronized using a lock. Note that an array of :data:`ctypes.c_char` has ``value`` and ``raw`` attributes which allow one to use it to store and retrieve strings -- see documentation for :mod:`ctypes`. .. function:: Array(typecode_or_type, size_or_initializer, *args[, lock]) The same as :func:`RawArray` except that depending on the value of *lock* a process-safe synchronization wrapper may be returned instead of a raw ctypes array. If *lock* is ``True`` (the default) then a new lock object is created to synchronize access to the value. If *lock* is a :class:`Lock` or :class:`RLock` object then that will be used to synchronize access to the value. If *lock* is ``False`` then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that *lock* is a keyword-only argument. .. function:: Value(typecode_or_type, *args[, lock]) The same as :func:`RawValue` except that depending on the value of *lock* a process-safe synchronization wrapper may be returned instead of a raw ctypes object. If *lock* is ``True`` (the default) then a new lock object is created to synchronize access to the value. If *lock* is a :class:`Lock` or :class:`RLock` object then that will be used to synchronize access to the value. If *lock* is ``False`` then access to the returned object will not be automatically protected by a lock, so it will not necessarily be "process-safe". Note that *lock* is a keyword-only argument. .. function:: copy(obj) Return a ctypes object allocated from shared memory which is a copy of the ctypes object *obj*. .. function:: synchronized(obj[, lock]) Return a process-safe wrapper object for a ctypes object which uses *lock* to synchronize access. If *lock* is ``None`` (the default) then a :class:`multiprocessing.RLock` object is created automatically. A synchronized wrapper will have two methods in addition to those of the object it wraps: :meth:`get_obj` returns the wrapped object and :meth:`get_lock` returns the lock object used for synchronization. Note that accessing the ctypes object through the wrapper can be a lot slower than accessing the raw ctypes object. The table below compares the syntax for creating shared ctypes objects from shared memory with the normal ctypes syntax. (In the table ``MyStruct`` is some subclass of :class:`ctypes.Structure`.) ==================== ========================== =========================== ctypes sharedctypes using type sharedctypes using typecode ==================== ========================== =========================== c_double(2.4) RawValue(c_double, 2.4) RawValue('d', 2.4) MyStruct(4, 6) RawValue(MyStruct, 4, 6) (c_short * 7)() RawArray(c_short, 7) RawArray('h', 7) (c_int * 3)(9, 2, 8) RawArray(c_int, (9, 2, 8)) RawArray('i', (9, 2, 8)) ==================== ========================== =========================== Below is an example where a number of ctypes objects are modified by a child process:: from multiprocessing import Process, Lock from multiprocessing.sharedctypes import Value, Array from ctypes import Structure, c_double class Point(Structure): _fields_ = [('x', c_double), ('y', c_double)] def modify(n, x, s, A): n.value **= 2 x.value **= 2 s.value = s.value.upper() for a in A: a.x **= 2 a.y **= 2 if __name__ == '__main__': lock = Lock() n = Value('i', 7) x = Value(c_double, 1.0/3.0, lock=False) s = Array('c', 'hello world', lock=lock) A = Array(Point, [(1.875,-6.25), (-5.75,2.0), (2.375,9.5)], lock=lock) p = Process(target=modify, args=(n, x, s, A)) p.start() p.join() print(n.value) print(x.value) print(s.value) print([(a.x, a.y) for a in A]) .. highlight:: none The results printed are :: 49 0.1111111111111111 HELLO WORLD [(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)] .. highlight:: python .. _multiprocessing-managers: Managers ~~~~~~~~ Managers provide a way to create data which can be shared between different processes. A manager object controls a server process which manages *shared objects*. Other processes can access the shared objects by using proxies. .. function:: multiprocessing.Manager() Returns a started :class:`~multiprocessing.managers.SyncManager` object which can be used for sharing objects between processes. The returned manager object corresponds to a spawned child process and has methods which will create shared objects and return corresponding proxies. .. module:: multiprocessing.managers :synopsis: Share data between process with shared objects. Manager processes will be shutdown as soon as they are garbage collected or their parent process exits. The manager classes are defined in the :mod:`multiprocessing.managers` module: .. class:: BaseManager([address[, authkey]]) Create a BaseManager object. Once created one should call :meth:`start` or ``get_server().serve_forever()`` to ensure that the manager object refers to a started manager process. *address* is the address on which the manager process listens for new connections. If *address* is ``None`` then an arbitrary one is chosen. *authkey* is the authentication key which will be used to check the validity of incoming connections to the server process. If *authkey* is ``None`` then ``current_process().authkey``. Otherwise *authkey* is used and it must be a string. .. method:: start([initializer[, initargs]]) Start a subprocess to start the manager. If *initializer* is not ``None`` then the subprocess will call ``initializer(*initargs)`` when it starts. .. method:: get_server() Returns a :class:`Server` object which represents the actual server under the control of the Manager. The :class:`Server` object supports the :meth:`serve_forever` method:: >>> from multiprocessing.managers import BaseManager >>> manager = BaseManager(address=('', 50000), authkey='abc') >>> server = manager.get_server() >>> server.serve_forever() :class:`Server` additionally has an :attr:`address` attribute. .. method:: connect() Connect a local manager object to a remote manager process:: >>> from multiprocessing.managers import BaseManager >>> m = BaseManager(address=('127.0.0.1', 5000), authkey='abc') >>> m.connect() .. method:: shutdown() Stop the process used by the manager. This is only available if :meth:`start` has been used to start the server process. This can be called multiple times. .. method:: register(typeid[, callable[, proxytype[, exposed[, method_to_typeid[, create_method]]]]]) A classmethod which can be used for registering a type or callable with the manager class. *typeid* is a "type identifier" which is used to identify a particular type of shared object. This must be a string. *callable* is a callable used for creating objects for this type identifier. If a manager instance will be created using the :meth:`from_address` classmethod or if the *create_method* argument is ``False`` then this can be left as ``None``. *proxytype* is a subclass of :class:`BaseProxy` which is used to create proxies for shared objects with this *typeid*. If ``None`` then a proxy class is created automatically. *exposed* is used to specify a sequence of method names which proxies for this typeid should be allowed to access using :meth:`BaseProxy._callMethod`. (If *exposed* is ``None`` then :attr:`proxytype._exposed_` is used instead if it exists.) In the case where no exposed list is specified, all "public methods" of the shared object will be accessible. (Here a "public method" means any attribute which has a :meth:`__call__` method and whose name does not begin with ``'_'``.) *method_to_typeid* is a mapping used to specify the return type of those exposed methods which should return a proxy. It maps method names to typeid strings. (If *method_to_typeid* is ``None`` then :attr:`proxytype._method_to_typeid_` is used instead if it exists.) If a method's name is not a key of this mapping or if the mapping is ``None`` then the object returned by the method will be copied by value. *create_method* determines whether a method should be created with name *typeid* which can be used to tell the server process to create a new shared object and return a proxy for it. By default it is ``True``. :class:`BaseManager` instances also have one read-only property: .. attribute:: address The address used by the manager. .. class:: SyncManager A subclass of :class:`BaseManager` which can be used for the synchronization of processes. Objects of this type are returned by :func:`multiprocessing.Manager`. It also supports creation of shared lists and dictionaries. .. method:: BoundedSemaphore([value]) Create a shared :class:`threading.BoundedSemaphore` object and return a proxy for it. .. method:: Condition([lock]) Create a shared :class:`threading.Condition` object and return a proxy for it. If *lock* is supplied then it should be a proxy for a :class:`threading.Lock` or :class:`threading.RLock` object. .. versionchanged:: 3.3 The :meth:`wait_for` method was added. .. method:: Event() Create a shared :class:`threading.Event` object and return a proxy for it. .. method:: Lock() Create a shared :class:`threading.Lock` object and return a proxy for it. .. method:: Namespace() Create a shared :class:`Namespace` object and return a proxy for it. .. method:: Queue([maxsize]) Create a shared :class:`Queue.Queue` object and return a proxy for it. .. method:: RLock() Create a shared :class:`threading.RLock` object and return a proxy for it. .. method:: Semaphore([value]) Create a shared :class:`threading.Semaphore` object and return a proxy for it. .. method:: Array(typecode, sequence) Create an array and return a proxy for it. .. method:: Value(typecode, value) Create an object with a writable ``value`` attribute and return a proxy for it. .. method:: dict() dict(mapping) dict(sequence) Create a shared ``dict`` object and return a proxy for it. .. method:: list() list(sequence) Create a shared ``list`` object and return a proxy for it. .. note:: Modifications to mutable values or items in dict and list proxies will not be propagated through the manager, because the proxy has no way of knowing when its values or items are modified. To modify such an item, you can re-assign the modified object to the container proxy:: # create a list proxy and append a mutable object (a dictionary) lproxy = manager.list() lproxy.append({}) # now mutate the dictionary d = lproxy[0] d['a'] = 1 d['b'] = 2 # at this point, the changes to d are not yet synced, but by # reassigning the dictionary, the proxy is notified of the change lproxy[0] = d Namespace objects >>>>>>>>>>>>>>>>> A namespace object has no public methods, but does have writable attributes. Its representation shows the values of its attributes. However, when using a proxy for a namespace object, an attribute beginning with ``'_'`` will be an attribute of the proxy and not an attribute of the referent: .. doctest:: >>> manager = multiprocessing.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' >>> Global._z = 12.3 # this is an attribute of the proxy >>> print(Global) Namespace(x=10, y='hello') Customized managers >>>>>>>>>>>>>>>>>>> To create one's own manager, one creates a subclass of :class:`BaseManager` and uses the :meth:`~BaseManager.register` classmethod to register new types or callables with the manager class. For example:: from multiprocessing.managers import BaseManager class MathsClass: def add(self, x, y): return x + y def mul(self, x, y): return x * y class MyManager(BaseManager): pass MyManager.register('Maths', MathsClass) if __name__ == '__main__': manager = MyManager() manager.start() maths = manager.Maths() print(maths.add(4, 3)) # prints 7 print(maths.mul(7, 8)) # prints 56 Using a remote manager >>>>>>>>>>>>>>>>>>>>>> It is possible to run a manager server on one machine and have clients use it from other machines (assuming that the firewalls involved allow it). Running the following commands creates a server for a single shared queue which remote clients can access:: >>> from multiprocessing.managers import BaseManager >>> import queue >>> queue = Queue.Queue() >>> class QueueManager(BaseManager): pass >>> QueueManager.register('get_queue', callable=lambda:queue) >>> m = QueueManager(address=('', 50000), authkey='abracadabra') >>> s = m.get_server() >>> s.serve_forever() One client can access the server as follows:: >>> from multiprocessing.managers import BaseManager >>> class QueueManager(BaseManager): pass >>> QueueManager.register('get_queue') >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='abracadabra') >>> m.connect() >>> queue = m.get_queue() >>> Queue.put('hello') Another client can also use it:: >>> from multiprocessing.managers import BaseManager >>> class QueueManager(BaseManager): pass >>> QueueManager.register('get_queue') >>> m = QueueManager(address=('foo.bar.org', 50000), authkey='abracadabra') >>> m.connect() >>> queue = m.get_queue() >>> Queue.get() 'hello' Local processes can also access that queue, using the code from above on the client to access it remotely:: >>> from multiprocessing import Process, Queue >>> from multiprocessing.managers import BaseManager >>> class Worker(Process): ... def __init__(self, q): ... self.q = q ... super(Worker, self).__init__() ... def run(self): ... self.q.put('local hello') ... >>> queue = Queue() >>> w = Worker(queue) >>> w.start() >>> class QueueManager(BaseManager): pass ... >>> QueueManager.register('get_queue', callable=lambda: queue) >>> m = QueueManager(address=('', 50000), authkey='abracadabra') >>> s = m.get_server() >>> s.serve_forever() Proxy Objects ~~~~~~~~~~~~~ A proxy is an object which *refers* to a shared object which lives (presumably) in a different process. The shared object is said to be the *referent* of the proxy. Multiple proxy objects may have the same referent. A proxy object has methods which invoke corresponding methods of its referent (although not every method of the referent will necessarily be available through the proxy). A proxy can usually be used in most of the same ways that its referent can: .. doctest:: >>> from multiprocessing import Manager >>> manager = Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print(l) [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] >>> print(repr(l)) >>> l[4] 16 >>> l[2:5] [4, 9, 16] Notice that applying :func:`str` to a proxy will return the representation of the referent, whereas applying :func:`repr` will return the representation of the proxy. An important feature of proxy objects is that they are picklable so they can be passed between processes. Note, however, that if a proxy is sent to the corresponding manager's process then unpickling it will produce the referent itself. This means, for example, that one shared object can contain a second: .. doctest:: >>> a = manager.list() >>> b = manager.list() >>> a.append(b) # referent of a now contains referent of b >>> print(a, b) [[]] [] >>> b.append('hello') >>> print(a, b) [['hello']] ['hello'] .. note:: The proxy types in :mod:`multiprocessing` do nothing to support comparisons by value. So, for instance, we have: .. doctest:: >>> manager.list([1,2,3]) == [1,2,3] False One should just use a copy of the referent instead when making comparisons. .. class:: BaseProxy Proxy objects are instances of subclasses of :class:`BaseProxy`. .. method:: _callmethod(methodname[, args[, kwds]]) Call and return the result of a method of the proxy's referent. If ``proxy`` is a proxy whose referent is ``obj`` then the expression :: proxy._callmethod(methodname, args, kwds) will evaluate the expression :: getattr(obj, methodname)(*args, **kwds) in the manager's process. The returned value will be a copy of the result of the call or a proxy to a new shared object -- see documentation for the *method_to_typeid* argument of :meth:`BaseManager.register`. If an exception is raised by the call, then is re-raised by :meth:`_callmethod`. If some other exception is raised in the manager's process then this is converted into a :exc:`RemoteError` exception and is raised by :meth:`_callmethod`. Note in particular that an exception will be raised if *methodname* has not been *exposed* An example of the usage of :meth:`_callmethod`: .. doctest:: >>> l = manager.list(range(10)) >>> l._callmethod('__len__') 10 >>> l._callmethod('__getslice__', (2, 7)) # equiv to `l[2:7]` [2, 3, 4, 5, 6] >>> l._callmethod('__getitem__', (20,)) # equiv to `l[20]` Traceback (most recent call last): ... IndexError: list index out of range .. method:: _getvalue() Return a copy of the referent. If the referent is unpicklable then this will raise an exception. .. method:: __repr__ Return a representation of the proxy object. .. method:: __str__ Return the representation of the referent. Cleanup >>>>>>> A proxy object uses a weakref callback so that when it gets garbage collected it deregisters itself from the manager which owns its referent. A shared object gets deleted from the manager process when there are no longer any proxies referring to it. Process Pools ~~~~~~~~~~~~~ .. module:: multiprocessing.pool :synopsis: Create pools of processes. One can create a pool of processes which will carry out tasks submitted to it with the :class:`Pool` class. .. class:: multiprocessing.Pool([processes[, initializer[, initargs[, maxtasksperchild]]]]) A process pool object which controls a pool of worker processes to which jobs can be submitted. It supports asynchronous results with timeouts and callbacks and has a parallel map implementation. *processes* is the number of worker processes to use. If *processes* is ``None`` then the number returned by :func:`cpu_count` is used. If *initializer* is not ``None`` then each worker process will call ``initializer(*initargs)`` when it starts. .. versionadded:: 3.2 *maxtasksperchild* is the number of tasks a worker process can complete before it will exit and be replaced with a fresh worker process, to enable unused resources to be freed. The default *maxtasksperchild* is None, which means worker processes will live as long as the pool. .. note:: Worker processes within a :class:`Pool` typically live for the complete duration of the Pool's work queue. A frequent pattern found in other systems (such as Apache, mod_wsgi, etc) to free resources held by workers is to allow a worker within a pool to complete only a set amount of work before being exiting, being cleaned up and a new process spawned to replace the old one. The *maxtasksperchild* argument to the :class:`Pool` exposes this ability to the end user. .. method:: apply(func[, args[, kwds]]) Call *func* with arguments *args* and keyword arguments *kwds*. It blocks until the result is ready. Given this blocks, :meth:`apply_async` is better suited for performing work in parallel. Additionally, *func* is only executed in one of the workers of the pool. .. method:: apply_async(func[, args[, kwds[, callback[, error_callback]]]]) A variant of the :meth:`apply` method which returns a result object. If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* is applied instead If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then the *error_callback* is called with the exception instance. Callbacks should complete immediately since otherwise the thread which handles the results will get blocked. .. method:: map(func, iterable[, chunksize]) A parallel equivalent of the :func:`map` built-in function (it supports only one *iterable* argument though). It blocks until the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting *chunksize* to a positive integer. .. method:: map_async(func, iterable[, chunksize[, callback[, error_callback]]]) A variant of the :meth:`.map` method which returns a result object. If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* is applied instead If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then the *error_callback* is called with the exception instance. Callbacks should complete immediately since otherwise the thread which handles the results will get blocked. .. method:: imap(func, iterable[, chunksize]) A lazier version of :meth:`map`. The *chunksize* argument is the same as the one used by the :meth:`.map` method. For very long iterables using a large value for *chunksize* can make the job complete **much** faster than using the default value of ``1``. Also if *chunksize* is ``1`` then the :meth:`!next` method of the iterator returned by the :meth:`imap` method has an optional *timeout* parameter: ``next(timeout)`` will raise :exc:`multiprocessing.TimeoutError` if the result cannot be returned within *timeout* seconds. .. method:: imap_unordered(func, iterable[, chunksize]) The same as :meth:`imap` except that the ordering of the results from the returned iterator should be considered arbitrary. (Only when there is only one worker process is the order guaranteed to be "correct".) .. method:: starmap(func, iterable[, chunksize]) Like :meth:`map` except that the elements of the `iterable` are expected to be iterables that are unpacked as arguments. Hence an `iterable` of `[(1,2), (3, 4)]` results in `[func(1,2), func(3,4)]`. .. versionadded:: 3.3 .. method:: starmap_async(func, iterable[, chunksize[, callback[, error_back]]]) A combination of :meth:`starmap` and :meth:`map_async` that iterates over `iterable` of iterables and calls `func` with the iterables unpacked. Returns a result object. .. versionadded:: 3.3 .. method:: close() Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit. .. method:: terminate() Stops the worker processes immediately without completing outstanding work. When the pool object is garbage collected :meth:`terminate` will be called immediately. .. method:: join() Wait for the worker processes to exit. One must call :meth:`close` or :meth:`terminate` before using :meth:`join`. .. class:: AsyncResult The class of the result returned by :meth:`Pool.apply_async` and :meth:`Pool.map_async`. .. method:: get([timeout]) Return the result when it arrives. If *timeout* is not ``None`` and the result does not arrive within *timeout* seconds then :exc:`multiprocessing.TimeoutError` is raised. If the remote call raised an exception then that exception will be reraised by :meth:`get`. .. method:: wait([timeout]) Wait until the result is available or until *timeout* seconds pass. .. method:: ready() Return whether the call has completed. .. method:: successful() Return whether the call completed without raising an exception. Will raise :exc:`AssertionError` if the result is not ready. The following example demonstrates the use of a pool:: from multiprocessing import Pool def f(x): return x*x if __name__ == '__main__': pool = Pool(processes=4) # start 4 worker processes result = pool.apply_async(f, (10,)) # evaluate "f(10)" asynchronously print(result.get(timeout=1)) # prints "100" unless your computer is *very* slow print(pool.map(f, range(10))) # prints "[0, 1, 4,..., 81]" it = pool.imap(f, range(10)) print(next(it)) # prints "0" print(next(it)) # prints "1" print(it.next(timeout=1)) # prints "4" unless your computer is *very* slow import time result = pool.apply_async(time.sleep, (10,)) print(result.get(timeout=1)) # raises TimeoutError .. _multiprocessing-listeners-clients: Listeners and Clients ~~~~~~~~~~~~~~~~~~~~~ .. module:: multiprocessing.connection :synopsis: API for dealing with sockets. Usually message passing between processes is done using queues or by using :class:`Connection` objects returned by :func:`Pipe`. However, the :mod:`multiprocessing.connection` module allows some extra flexibility. It basically gives a high level message oriented API for dealing with sockets or Windows named pipes. It also has support for *digest authentication* using the :mod:`hmac` module, and for polling multiple connections at the same time. .. function:: deliver_challenge(connection, authkey) Send a randomly generated message to the other end of the connection and wait for a reply. If the reply matches the digest of the message using *authkey* as the key then a welcome message is sent to the other end of the connection. Otherwise :exc:`AuthenticationError` is raised. .. function:: answerChallenge(connection, authkey) Receive a message, calculate the digest of the message using *authkey* as the key, and then send the digest back. If a welcome message is not received, then :exc:`AuthenticationError` is raised. .. function:: Client(address[, family[, authenticate[, authkey]]]) Attempt to set up a connection to the listener which is using address *address*, returning a :class:`~multiprocessing.Connection`. The type of the connection is determined by *family* argument, but this can generally be omitted since it can usually be inferred from the format of *address*. (See :ref:`multiprocessing-address-formats`) If *authenticate* is ``True`` or *authkey* is a string then digest authentication is used. The key used for authentication will be either *authkey* or ``current_process().authkey)`` if *authkey* is ``None``. If authentication fails then :exc:`AuthenticationError` is raised. See :ref:`multiprocessing-auth-keys`. .. class:: Listener([address[, family[, backlog[, authenticate[, authkey]]]]]) A wrapper for a bound socket or Windows named pipe which is 'listening' for connections. *address* is the address to be used by the bound socket or named pipe of the listener object. .. note:: If an address of '0.0.0.0' is used, the address will not be a connectable end point on Windows. If you require a connectable end-point, you should use '127.0.0.1'. *family* is the type of socket (or named pipe) to use. This can be one of the strings ``'AF_INET'`` (for a TCP socket), ``'AF_UNIX'`` (for a Unix domain socket) or ``'AF_PIPE'`` (for a Windows named pipe). Of these only the first is guaranteed to be available. If *family* is ``None`` then the family is inferred from the format of *address*. If *address* is also ``None`` then a default is chosen. This default is the family which is assumed to be the fastest available. See :ref:`multiprocessing-address-formats`. Note that if *family* is ``'AF_UNIX'`` and address is ``None`` then the socket will be created in a private temporary directory created using :func:`tempfile.mkstemp`. If the listener object uses a socket then *backlog* (1 by default) is passed to the :meth:`listen` method of the socket once it has been bound. If *authenticate* is ``True`` (``False`` by default) or *authkey* is not ``None`` then digest authentication is used. If *authkey* is a string then it will be used as the authentication key; otherwise it must be *None*. If *authkey* is ``None`` and *authenticate* is ``True`` then ``current_process().authkey`` is used as the authentication key. If *authkey* is ``None`` and *authenticate* is ``False`` then no authentication is done. If authentication fails then :exc:`AuthenticationError` is raised. See :ref:`multiprocessing-auth-keys`. .. method:: accept() Accept a connection on the bound socket or named pipe of the listener object and return a :class:`Connection` object. If authentication is attempted and fails, then :exc:`AuthenticationError` is raised. .. method:: close() Close the bound socket or named pipe of the listener object. This is called automatically when the listener is garbage collected. However it is advisable to call it explicitly. Listener objects have the following read-only properties: .. attribute:: address The address which is being used by the Listener object. .. attribute:: last_accepted The address from which the last accepted connection came. If this is unavailable then it is ``None``. .. function:: wait(object_list, timeout=None) Wait till an object in *object_list* is ready. Returns the list of those objects in *object_list* which are ready. If *timeout* is a float then the call blocks for at most that many seconds. If *timeout* is ``None`` then it will block for an unlimited period. For both Unix and Windows, an object can appear in *object_list* if it is * a readable :class:`~multiprocessing.Connection` object; * a connected and readable :class:`socket.socket` object; or * the :attr:`~multiprocessing.Process.sentinel` attribute of a :class:`~multiprocessing.Process` object. A connection or socket object is ready when there is data available to be read from it, or the other end has been closed. **Unix**: ``wait(object_list, timeout)`` almost equivalent ``select.select(object_list, [], [], timeout)``. The difference is that, if :func:`select.select` is interrupted by a signal, it can raise :exc:`OSError` with an error number of ``EINTR``, whereas :func:`wait` will not. **Windows**: An item in *object_list* must either be an integer handle which is waitable (according to the definition used by the documentation of the Win32 function ``WaitForMultipleObjects()``) or it can be an object with a :meth:`fileno` method which returns a socket handle or pipe handle. (Note that pipe handles and socket handles are **not** waitable handles.) .. versionadded:: 3.3 The module defines two exceptions: .. exception:: AuthenticationError Exception raised when there is an authentication error. **Examples** The following server code creates a listener which uses ``'secret password'`` as an authentication key. It then waits for a connection and sends some data to the client:: from multiprocessing.connection import Listener from array import array address = ('localhost', 6000) # family is deduced to be 'AF_INET' listener = Listener(address, authkey=b'secret password') conn = listener.accept() print('connection accepted from', listener.last_accepted) conn.send([2.25, None, 'junk', float]) conn.send_bytes(b'hello') conn.send_bytes(array('i', [42, 1729])) conn.close() listener.close() The following code connects to the server and receives some data from the server:: from multiprocessing.connection import Client from array import array address = ('localhost', 6000) conn = Client(address, authkey=b'secret password') print(conn.recv()) # => [2.25, None, 'junk', float] print(conn.recv_bytes()) # => 'hello' arr = array('i', [0, 0, 0, 0, 0]) print(conn.recv_bytes_into(arr)) # => 8 print(arr) # => array('i', [42, 1729, 0, 0, 0]) conn.close() The following code uses :func:`~multiprocessing.connection.wait` to wait for messages from multiple processes at once:: import time, random from multiprocessing import Process, Pipe, current_process from multiprocessing.connection import wait def foo(w): for i in range(10): w.send((i, current_process().name)) w.close() if __name__ == '__main__': readers = [] for i in range(4): r, w = Pipe(duplex=False) readers.append(r) p = Process(target=foo, args=(w,)) p.start() # We close the writable end of the pipe now to be sure that # p is the only process which owns a handle for it. This # ensures that when p closes its handle for the writable end, # wait() will promptly report the readable end as being ready. w.close() while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) else: print(msg) .. _multiprocessing-address-formats: Address Formats >>>>>>>>>>>>>>> * An ``'AF_INET'`` address is a tuple of the form ``(hostname, port)`` where *hostname* is a string and *port* is an integer. * An ``'AF_UNIX'`` address is a string representing a filename on the filesystem. * An ``'AF_PIPE'`` address is a string of the form :samp:`r'\\\\.\\pipe\\{PipeName}'`. To use :func:`Client` to connect to a named pipe on a remote computer called *ServerName* one should use an address of the form :samp:`r'\\\\{ServerName}\\pipe\\{PipeName}'` instead. Note that any string beginning with two backslashes is assumed by default to be an ``'AF_PIPE'`` address rather than an ``'AF_UNIX'`` address. .. _multiprocessing-auth-keys: Authentication keys ~~~~~~~~~~~~~~~~~~~ When one uses :meth:`Connection.recv`, the data received is automatically unpickled. Unfortunately unpickling data from an untrusted source is a security risk. Therefore :class:`Listener` and :func:`Client` use the :mod:`hmac` module to provide digest authentication. An authentication key is a string which can be thought of as a password: once a connection is established both ends will demand proof that the other knows the authentication key. (Demonstrating that both ends are using the same key does **not** involve sending the key over the connection.) If authentication is requested but do authentication key is specified then the return value of ``current_process().authkey`` is used (see :class:`~multiprocessing.Process`). This value will automatically inherited by any :class:`~multiprocessing.Process` object that the current process creates. This means that (by default) all processes of a multi-process program will share a single authentication key which can be used when setting up connections between themselves. Suitable authentication keys can also be generated by using :func:`os.urandom`. Logging ~~~~~~~ Some support for logging is available. Note, however, that the :mod:`logging` package does not use process shared locks so it is possible (depending on the handler type) for messages from different processes to get mixed up. .. currentmodule:: multiprocessing .. function:: get_logger() Returns the logger used by :mod:`multiprocessing`. If necessary, a new one will be created. When first created the logger has level :data:`logging.NOTSET` and no default handler. Messages sent to this logger will not by default propagate to the root logger. Note that on Windows child processes will only inherit the level of the parent process's logger -- any other customization of the logger will not be inherited. .. currentmodule:: multiprocessing .. function:: log_to_stderr() This function performs a call to :func:`get_logger` but in addition to returning the logger created by get_logger, it adds a handler which sends output to :data:`sys.stderr` using format ``'[%(levelname)s/%(processName)s] %(message)s'``. Below is an example session with logging turned on:: >>> import multiprocessing, logging >>> logger = multiprocessing.log_to_stderr() >>> logger.setLevel(logging.INFO) >>> logger.warning('doomed') [WARNING/MainProcess] doomed >>> m = multiprocessing.Manager() [INFO/SyncManager-...] child process calling self.run() [INFO/SyncManager-...] created temp directory /.../pymp-... [INFO/SyncManager-...] manager serving at '/.../listener-...' >>> del m [INFO/MainProcess] sending shutdown message to manager [INFO/SyncManager-...] manager exiting with exitcode 0 In addition to having these two logging functions, the multiprocessing also exposes two additional logging level attributes. These are :const:`SUBWARNING` and :const:`SUBDEBUG`. The table below illustrates where theses fit in the normal level hierarchy. +----------------+----------------+ | Level | Numeric value | +================+================+ | ``SUBWARNING`` | 25 | +----------------+----------------+ | ``SUBDEBUG`` | 5 | +----------------+----------------+ For a full table of logging levels, see the :mod:`logging` module. These additional logging levels are used primarily for certain debug messages within the multiprocessing module. Below is the same example as above, except with :const:`SUBDEBUG` enabled:: >>> import multiprocessing, logging >>> logger = multiprocessing.log_to_stderr() >>> logger.setLevel(multiprocessing.SUBDEBUG) >>> logger.warning('doomed') [WARNING/MainProcess] doomed >>> m = multiprocessing.Manager() [INFO/SyncManager-...] child process calling self.run() [INFO/SyncManager-...] created temp directory /.../pymp-... [INFO/SyncManager-...] manager serving at '/.../pymp-djGBXN/listener-...' >>> del m [SUBDEBUG/MainProcess] finalizer calling ... [INFO/MainProcess] sending shutdown message to manager [DEBUG/SyncManager-...] manager received shutdown message [SUBDEBUG/SyncManager-...] calling ... [SUBDEBUG/SyncManager-...] calling [SUBDEBUG/SyncManager-...] finalizer calling ... [INFO/SyncManager-...] manager exiting with exitcode 0 The :mod:`multiprocessing.dummy` module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. module:: multiprocessing.dummy :synopsis: Dumb wrapper around threading. :mod:`multiprocessing.dummy` replicates the API of :mod:`multiprocessing` but is no more than a wrapper around the :mod:`threading` module. .. _multiprocessing-programming: Programming guidelines ---------------------- There are certain guidelines and idioms which should be adhered to when using :mod:`multiprocessing`. All platforms ~~~~~~~~~~~~~ Avoid shared state As far as possible one should try to avoid shifting large amounts of data between processes. It is probably best to stick to using queues or pipes for communication between processes rather than using the lower level synchronization primitives from the :mod:`threading` module. Picklability Ensure that the arguments to the methods of proxies are picklable. Thread safety of proxies Do not use a proxy object from more than one thread unless you protect it with a lock. (There is never a problem with different processes using the *same* proxy.) Joining zombie processes On Unix when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or :func:`active_children` is called) all completed processes which have not yet been joined will be joined. Also calling a finished process's :meth:`Process.is_alive` will join the process. Even so it is probably good practice to explicitly join all the processes that you start. Better to inherit than pickle/unpickle On Windows many types from :mod:`multiprocessing` need to be picklable so that child processes can use them. However, one should generally avoid sending shared objects to other processes using pipes or queues. Instead you should arrange the program so that a process which needs access to a shared resource created elsewhere can inherit it from an ancestor process. Avoid terminating processes Using the :meth:`Process.terminate` method to stop a process is liable to cause any shared resources (such as locks, semaphores, pipes and queues) currently being used by the process to become broken or unavailable to other processes. Therefore it is probably best to only consider using :meth:`Process.terminate` on processes which never use any shared resources. Joining processes that use queues Bear in mind that a process that has put items in a queue will wait before terminating until all the buffered items are fed by the "feeder" thread to the underlying pipe. (The child process can call the :meth:`Queue.cancel_join_thread` method of the queue to avoid this behaviour.) This means that whenever you use a queue you need to make sure that all items which have been put on the queue will eventually be removed before the process is joined. Otherwise you cannot be sure that processes which have put items on the queue will terminate. Remember also that non-daemonic processes will be automatically be joined. An example which will deadlock is the following:: from multiprocessing import Process, Queue def f(q): q.put('X' * 1000000) if __name__ == '__main__': queue = Queue() p = Process(target=f, args=(queue,)) p.start() p.join() # this deadlocks obj = queue.get() A fix here would be to swap the last two lines round (or simply remove the ``p.join()`` line). Explicitly pass resources to child processes On Unix a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not be garbage collected in the parent process. This might be important if some resource is freed when the object is garbage collected in the parent process. So for instance :: from multiprocessing import Process, Lock def f(): ... do something using "lock" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f).start() should be rewritten as :: from multiprocessing import Process, Lock def f(l): ... do something using "l" ... if __name__ == '__main__': lock = Lock() for i in range(10): Process(target=f, args=(lock,)).start() Beware of replacing :data:`sys.stdin` with a "file like object" :mod:`multiprocessing` originally unconditionally called:: os.close(sys.stdin.fileno()) in the :meth:`multiprocessing.Process._bootstrap` method --- this resulted in issues with processes-in-processes. This has been changed to:: sys.stdin.close() sys.stdin = open(os.devnull) Which solves the fundamental issue of processes colliding with each other resulting in a bad file descriptor error, but introduces a potential danger to applications which replace :func:`sys.stdin` with a "file-like object" with output buffering. This danger is that if multiple processes call :func:`close()` on this file-like object, it could result in the same data being flushed to the object multiple times, resulting in corruption. If you write a file-like object and implement your own caching, you can make it fork-safe by storing the pid whenever you append to the cache, and discarding the cache when the pid changes. For example:: @property def cache(self): pid = os.getpid() if pid != self._pid: self._pid = pid self._cache = [] return self._cache For more information, see :issue:`5155`, :issue:`5313` and :issue:`5331` Windows ~~~~~~~ Since Windows lacks :func:`os.fork` it has a few extra restrictions: More picklability Ensure that all arguments to :meth:`Process.__init__` are picklable. This means, in particular, that bound or unbound methods cannot be used directly as the ``target`` argument on Windows --- just define a function and use that instead. Also, if you subclass :class:`Process` then make sure that instances will be picklable when the :meth:`Process.start` method is called. Global variables Bear in mind that if code run in a child process tries to access a global variable, then the value it sees (if any) may not be the same as the value in the parent process at the time that :meth:`Process.start` was called. However, global variables which are just module level constants cause no problems. Safe importing of main module Make sure that the main module can be safely imported by a new Python interpreter without causing unintended side effects (such a starting a new process). For example, under Windows running the following module would fail with a :exc:`RuntimeError`:: from multiprocessing import Process def foo(): print('hello') p = Process(target=foo) p.start() Instead one should protect the "entry point" of the program by using ``if __name__ == '__main__':`` as follows:: from multiprocessing import Process, freeze_support def foo(): print('hello') if __name__ == '__main__': freeze_support() p = Process(target=foo) p.start() (The ``freeze_support()`` line can be omitted if the program will be run normally instead of frozen.) This allows the newly spawned Python interpreter to safely import the module and then run the module's ``foo()`` function. Similar restrictions apply if a pool or manager is created in the main module. .. _multiprocessing-examples: Examples -------- Demonstration of how to create and use customized managers and proxies: .. literalinclude:: ../includes/mp_newtype.py :language: python3 Using :class:`Pool`: .. literalinclude:: ../includes/mp_pool.py :language: python3 Synchronization types like locks, conditions and queues: .. literalinclude:: ../includes/mp_synchronize.py :language: python3 An example showing how to use queues to feed tasks to a collection of worker processes and collect the results: .. literalinclude:: ../includes/mp_workers.py An example of how a pool of worker processes can each run a :class:`~http.server.SimpleHTTPRequestHandler` instance while sharing a single listening socket. .. literalinclude:: ../includes/mp_webserver.py Some simple benchmarks comparing :mod:`multiprocessing` with :mod:`threading`: .. literalinclude:: ../includes/mp_benchmarks.py billiard-3.5.0.3/Makefile0000644000175000017500000000616113132743245015040 0ustar omeromer00000000000000PROJ=billiard PYTHON=python GIT=git TOX=tox NOSETESTS=nosetests ICONV=iconv FLAKE8=flake8 FLAKEPLUS=flakeplus SPHINX2RST=sphinx2rst SPHINX_DIR=docs/ SPHINX_BUILDDIR="${SPHINX_DIR}/_build" README=README.rst README_SRC="docs/templates/readme.txt" CONTRIBUTING=CONTRIBUTING.rst CONTRIBUTING_SRC="docs/contributing.rst" SPHINX_HTMLDIR="${SPHINX_BUILDDIR}/html" DOCUMENTATION=Documentation FLAKEPLUSTARGET=2.7 all: help help: @echo "docs - Build documentation." @echo "test-all - Run tests for all supported python versions." @echo "distcheck ---------- - Check distribution for problems." @echo " test - Run unittests using current python." @echo " lint ------------ - Check codebase for problems." @echo " apicheck - Check API reference coverage." @echo " configcheck - Check configuration reference coverage." @echo " readmecheck - Check README.rst encoding." @echo " contribcheck - Check CONTRIBUTING.rst encoding" @echo " flakes -------- - Check code for syntax and style errors." @echo " flakecheck - Run flake8 on the source code." @echo " flakepluscheck - Run flakeplus on the source code." @echo "readme - Regenerate README.rst file." @echo "contrib - Regenerate CONTRIBUTING.rst file" @echo "clean-dist --------- - Clean all distribution build artifacts." @echo " clean-git-force - Remove all uncomitted files." @echo " clean ------------ - Non-destructive clean" @echo " clean-pyc - Remove .pyc/__pycache__ files" @echo " clean-docs - Remove documentation build artifacts." @echo " clean-build - Remove setup artifacts." clean: clean-docs clean-pyc clean-build clean-dist: clean clean-git-force Documentation: (cd "$(SPHINX_DIR)"; $(MAKE) html) mv "$(SPHINX_HTMLDIR)" $(DOCUMENTATION) docs: Documentation clean-docs: -rm -rf "$(SPHINX_BUILDDIR)" lint: flakecheck apicheck configcheck readmecheck apicheck: (cd "$(SPHINX_DIR)"; $(MAKE) apicheck) configcheck: (cd "$(SPHINX_DIR)"; $(MAKE) configcheck) flakecheck: $(FLAKE8) "$(PROJ)" flakediag: -$(MAKE) flakecheck flakepluscheck: $(FLAKEPLUS) --$(FLAKEPLUSTARGET) "$(PROJ)" flakeplusdiag: -$(MAKE) flakepluscheck flakes: flakediag flakeplusdiag clean-readme: -rm -f $(README) readmecheck: $(ICONV) -f ascii -t ascii $(README) >/dev/null $(README): $(SPHINX2RST) "$(README_SRC)" --ascii > $@ readme: clean-readme $(README) readmecheck clean-contrib: -rm -f "$(CONTRIBUTING)" $(CONTRIBUTING): $(SPHINX2RST) "$(CONTRIBUTING_SRC)" > $@ contrib: clean-contrib $(CONTRIBUTING) clean-pyc: -find . -type f -a \( -name "*.pyc" -o -name "*$$py.class" \) | xargs rm -find . -type d -name "__pycache__" | xargs rm -r removepyc: clean-pyc clean-build: rm -rf build/ dist/ .eggs/ *.egg-info/ .tox/ .coverage cover/ clean-git: $(GIT) clean -xdn clean-git-force: $(GIT) clean -xdf test-all: clean-pyc $(TOX) test: $(PYTHON) setup.py test cov: $(NOSETESTS) -xv --with-coverage --cover-html --cover-branch build: $(PYTHON) setup.py sdist bdist_wheel distcheck: lint test clean dist: readme contrib clean-dist build billiard-3.5.0.3/billiard.egg-info/0000755000175000017500000000000013132746522016651 5ustar omeromer00000000000000billiard-3.5.0.3/billiard.egg-info/not-zip-safe0000644000175000017500000000000113132746352021100 0ustar omeromer00000000000000 billiard-3.5.0.3/billiard.egg-info/dependency_links.txt0000644000175000017500000000000113132746522022717 0ustar omeromer00000000000000 billiard-3.5.0.3/billiard.egg-info/PKG-INFO0000644000175000017500000001001513132746522017743 0ustar omeromer00000000000000Metadata-Version: 1.1 Name: billiard Version: 3.5.0.3 Summary: Python multiprocessing fork with improvements and bugfixes Home-page: http://github.com/celery/billiard Author: Ask Solem Author-email: ask@celeryproject.org License: BSD Description: ======== billiard ======== :version: 3.5.0.2 |build-status-lin| |build-status-win| |license| |wheel| |pyversion| |pyimp| .. |build-status-lin| image:: https://secure.travis-ci.org/celery/billiard.png?branch=master :alt: Build status on Linux :target: https://travis-ci.org/celery/billiard .. |build-status-win| image:: https://ci.appveyor.com/api/projects/status/github/celery/billiard?png=true&branch=master :alt: Build status on Windows :target: https://ci.appveyor.com/project/ask/billiard .. |license| image:: https://img.shields.io/pypi/l/billiard.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/billiard.svg :alt: Billiard can be installed via wheel :target: http://pypi.python.org/pypi/billiard .. |pyversion| image:: https://img.shields.io/pypi/pyversions/billiard.svg :alt: Supported Python versions. :target: http://pypi.python.org/pypi/billiard .. |pyimp| image:: https://img.shields.io/pypi/implementation/billiard.svg :alt: Support Python implementations. :target: http://pypi.python.org/pypi/billiard About ----- `billiard` is a fork of the Python 2.7 `multiprocessing `_ package. The multiprocessing package itself is a renamed and updated version of R Oudkerk's `pyprocessing `_ package. This standalone variant draws its fixes/improvements from python-trunk and provides additional bug fixes and improvements. - This package would not be possible if not for the contributions of not only the current maintainers but all of the contributors to the original pyprocessing package listed `here `_ - Also it is a fork of the multiprocessing backport package by Christian Heims. - It includes the no-execv patch contributed by R. Oudkerk. - And the Pool improvements previously located in `Celery`_. - Billiard is used in and is a dependency for `Celery`_ and is maintained by the Celery team. .. _`Celery`: http://celeryproject.org Bug reporting ------------- Please report bugs related to multiprocessing at the `Python bug tracker `_. Issues related to billiard should be reported at http://github.com/celery/billiard/issues. Keywords: multiprocessing pool process Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python Classifier: Programming Language :: C Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: Jython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: License :: OSI Approved :: BSD License Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: System :: Distributed Computing billiard-3.5.0.3/billiard.egg-info/top_level.txt0000644000175000017500000000001113132746522021373 0ustar omeromer00000000000000billiard billiard-3.5.0.3/billiard.egg-info/SOURCES.txt0000644000175000017500000000314013132746522020533 0ustar omeromer00000000000000CHANGES.txt INSTALL.txt LICENSE.txt MANIFEST.in Makefile README.rst setup.cfg setup.py Doc/conf.py Doc/glossary.rst Doc/index.rst Doc/includes/__init__.py Doc/includes/mp_benchmarks.py Doc/includes/mp_newtype.py Doc/includes/mp_pool.py Doc/includes/mp_synchronize.py Doc/includes/mp_webserver.py Doc/includes/mp_workers.py Doc/library/multiprocessing.rst Modules/_billiard/multiprocessing.c Modules/_billiard/multiprocessing.h Modules/_billiard/semaphore.c Modules/_billiard/win32_functions.c billiard/__init__.py billiard/_ext.py billiard/_win.py billiard/common.py billiard/compat.py billiard/connection.py billiard/context.py billiard/einfo.py billiard/exceptions.py billiard/five.py billiard/forkserver.py billiard/heap.py billiard/managers.py billiard/pool.py billiard/popen_fork.py billiard/popen_forkserver.py billiard/popen_spawn_posix.py billiard/popen_spawn_win32.py billiard/process.py billiard/queues.py billiard/reduction.py billiard/resource_sharer.py billiard/semaphore_tracker.py billiard/sharedctypes.py billiard/spawn.py billiard/synchronize.py billiard/util.py billiard.egg-info/PKG-INFO billiard.egg-info/SOURCES.txt billiard.egg-info/dependency_links.txt billiard.egg-info/not-zip-safe billiard.egg-info/top_level.txt billiard/dummy/__init__.py billiard/dummy/connection.py requirements/pkgutils.txt requirements/test-ci.txt requirements/test.txt t/__init__.py t/integration/__init__.py t/integration/setup.py t/integration/tests/__init__.py t/integration/tests/test_multiprocessing.py t/unit/__init__.py t/unit/test_common.py t/unit/test_dummy.py t/unit/test_package.py t/unit/test_pool.py t/unit/test_win32.pybilliard-3.5.0.3/INSTALL.txt0000644000175000017500000000515313132743245015247 0ustar omeromer00000000000000.. default-role:: literal ================================ Installation of multiprocessing ================================ Versions earlier than Python 2.4 are not supported. If you are using Python 2.4 then you must install the `ctypes` package (which comes automatically with Python 2.5). Users of Python 2.4 on Windows also need to install the `pywin32` package. On Unix It's highly recommended to use Python 2.5.3 (not yet released) or apply the ``fork-thread-patch-2`` patch from `Issue 1683 http://bugs.python.org/issue1683`_. Windows binary builds for Python 2.4 and Python 2.5 are available at http://pypi.python.org/pypi/multiprocessing Python 2.6 and newer versions already come with multiprocessing. Although the stand alone variant of the multiprocessing package is kept compatible with 2.6, you mustn't install it with Python 2.6. Otherwise, if you have the correct C compiler setup then the source distribution can be installed the usual way:: python setup.py install It should not be necessary to do any editing of `setup.py` if you are using Windows, macOS or Linux. On other unices it may be necessary to modify the values of the `macros` dictionary or `libraries` list. The section to modify reads :: else: macros = dict( HAVE_SEM_OPEN=1, HAVE_SEM_TIMEDWAIT=1, HAVE_FD_TRANSFER=1 ) libraries = ['rt'] More details can be found in the comments in `setup.py`. Note that if you use `HAVE_SEM_OPEN=0` then support for posix semaphores will not been compiled in, and then many of the functions in the `processing` namespace like `Lock()`, `Queue()` or will not be available. However, one can still create a manager using `manager = processing.Manager()` and then do `lock = manager.Lock()` etc. Running tests ------------- To run the test scripts using Python 2.5 do :: python -m multiprocessing.tests and on Python 2.4 do :: python -c "from multiprocessing.tests import main; main()" The sources also come with a Makefile. To run the unit tests with the Makefile using Python 2.5 do :: make test using another version of Python do :: make test PYTHON=python2.4 This will run a number of test scripts using both processes and threads. Running examples ---------------- The make target `examples` runs several example scripts. Building docs ------------- To build the standalone documentation you need Sphinx 0.5 and setuptools 0.6c9 or newer. Both are available at http://pypi.python.org/. With setuptools installed, do :: sudo easy_install-2.5 "Sphinx>=0.5" make doc The docs end up in ``build/sphinx/builder_name``.