pax_global_header00006660000000000000000000000064133270427050014515gustar00rootroot0000000000000052 comment=b7cffd5da4820a8ca8f96d50b0a466e262b687e9 janus-0.4.0/000077500000000000000000000000001332704270500126365ustar00rootroot00000000000000janus-0.4.0/.coveragerc000066400000000000000000000001341332704270500147550ustar00rootroot00000000000000[run] branch = True source = janus, tests omit = site-packages [html] directory = coverage janus-0.4.0/.gitignore000066400000000000000000000013261332704270500146300ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ # PyBuilder target/ coverage .pytest_cachejanus-0.4.0/.pyup.yml000066400000000000000000000001221332704270500144270ustar00rootroot00000000000000# Label PRs with `deps-update` label label_prs: deps-update schedule: every week janus-0.4.0/.style.yapf000066400000000000000000000000361332704270500147340ustar00rootroot00000000000000[style] based_on_style = pep8 janus-0.4.0/.travis.yml000066400000000000000000000023051332704270500147470ustar00rootroot00000000000000language: python python: - "3.5" - "3.6" install: - pip install -r requirements-dev.txt - pip install mypy - pip install codecov script: - python setup.py check -rms - pytest tests --cov=janus - mypy janus cache: - apt - directories: - $HOME/.cache/pip before_cache: - rm -f $HOME/.cache/pip/log/debug.log after_success: codecov deploy: provider: pypi user: aio-libs-bot password: secure: HhuGwcUoaxo/9zwCzNKA4/eIeuHJa/rt92LDQO+V5k3W1w1ALNtzcz7j18YvIFnxfPoh0kON3t78jfddTBEnaoKy5oUH+O3A07V23aMAoYXjs6UIht4oqYQSqncRH+yreL74OexliuVrsnOuKmxYPyhiJdJ1OVBaCa0e43iblGFSkVLP9wQSrleWb3kgKjS/kaAJ0+UEGuDH7GPkozL3NR3IqtJYbulJ0MjORSblXyRCR5FV8ag0yVRjj5p2+8uXl4T53VNPecJ415q1wFt22RzJBI/orYmE90baZSwRUg3d6cj7QX689y9qvQ9Gc9QPiRJiQB1V9BmuLx7WQbvM/Nu470aMPFAKuLtJKdDcAc1z/+l4/d5kidMVZ+vFZftub66I6mREZeHjvJdnOaEPcHYk/Vm8a2aau3H4dt/4rYLQpqwt/LFUD2IiTimUdiJU/MNx1S4fMxobHUXRecQIQtXYdhY3j+eZKhBuiBodHIXbgnsT1myrBIEbNceMVVgfjFQOFid7dXkqWGvuT3onB51DeKixhdjR26pPX+8INYKiSS+ZgFy0K5YV5f3ShGO+QlT2kJv4IrixO7H/JfPCLI9L7P5T2D7viuPqDz6Ik5TiAMkWgKXwspEQSNi68k46WHoaQHl73EJy+XuCLBgs9oFL6iZ+ADvFvvoGu7hAmtQ= distributions: "sdist bdist_wheel" on: tags: true all_branches: true python: 3.6 janus-0.4.0/CHANGES.rst000066400000000000000000000021461332704270500144430ustar00rootroot00000000000000Changes ======= 0.4.0 (2018-07-28) ------------------ - Add ``py.typed`` macro #89 - Drop python 3.4 support and fix minimal version python3.5.3 #88 - Add property with that indicates if queue is closed #86 0.3.2 (2018-07-06) ------------------ - Fixed python 3.7 support #97 0.3.1 (2018-01-30) ------------------ - Fixed bug with join() in case tasks are added by sync_q.put() #75 0.3.0 (2017-02-21) ------------------ - Expose `unfinished_tasks` property #34 0.2.4 (2016-12-05) ------------------ - Restore tarball deploying 0.2.3 (2016-07-12) ------------------ - Fix exception type 0.2.2 (2016-07-11) ------------------ - Update asyncio.async() to use asyncio.ensure_future() #6 0.2.1 (2016-03-24) ------------------ - Fix `python setup.py test` command #4 0.2.0 (2015-09-20) ------------------ - Support Python 3.5 0.1.5 (2015-07-24) ------------------ - Use loop.time() instead of time.monotonic() 0.1.1 (2015-06-12) ------------------ - Fix some typos in README and setup.py - Add addtional checks for loop closing - Mention DataRobot 0.1.0 (2015-06-11) ------------------ - Initial release janus-0.4.0/LICENSE000066400000000000000000000261501332704270500136470ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2015-2018 Andrew Svetlov and aio-libs team Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. janus-0.4.0/MANIFEST.in000066400000000000000000000002071332704270500143730ustar00rootroot00000000000000include LICENSE include CHANGES.rst include README.rst include Makefile graft janus graft tests global-exclude *.pyc prune docs/_build janus-0.4.0/Makefile000066400000000000000000000004671332704270500143050ustar00rootroot00000000000000develop: python setup.py develop flake: flake8 janus tests test: flake develop py.test tests vtest: flake develop py.test -v tests yapf: yapf -ri janus tests setup.py cov: flake develop py.test --cov=janus --cov=tests --cov-report=term --cov-report=html @echo "open file://`pwd`/htmlcov/index.html" janus-0.4.0/README.rst000066400000000000000000000037421332704270500143330ustar00rootroot00000000000000======= janus ======= .. image:: https://travis-ci.com/aio-libs/janus.svg?branch=master :target: https://travis-ci.com/aio-libs/janus .. image:: https://codecov.io/gh/aio-libs/janus/branch/master/graph/badge.svg :target: https://codecov.io/gh/aio-libs/janus .. image:: https://img.shields.io/pypi/v/janus.svg :target: https://pypi.python.org/pypi/janus .. image:: https://badges.gitter.im/Join%20Chat.svg :target: https://gitter.im/aio-libs/Lobby :alt: Chat on Gitter Mixed sync-async queue, supposed to be used for communicating between classic synchronous (threaded) code and asynchronous (in terms of asyncio_) one. Like `Janus god `_ the queue object from the library has two faces: synchronous and asynchronous interface. Synchronous is fully compatible with `standard queue `_, asynchronous one follows `asyncio queue design `_. Usage example ============= .. code:: python import asyncio import janus loop = asyncio.get_event_loop() queue = janus.Queue(loop=loop) def threaded(sync_q): for i in range(100): sync_q.put(i) sync_q.join() async def async_coro(async_q): for i in range(100): val = await async_q.get() assert val == i async_q.task_done() fut = loop.run_in_executor(None, threaded, queue.sync_q) loop.run_until_complete(async_coro(queue.async_q)) loop.run_until_complete(fut) Communication channels ====================== *aio-libs* google group: https://groups.google.com/forum/#!forum/aio-libs Feel free to post your questions and ideas here. *gitter chat* https://gitter.im/aio-libs/Lobby License ======= ``janus`` library is offered under Apache 2 license. Thanks ====== The library development is sponsored by DataRobot (https://datarobot.com) .. _asyncio: https://docs.python.org/3/library/asyncio.html janus-0.4.0/janus/000077500000000000000000000000001332704270500137565ustar00rootroot00000000000000janus-0.4.0/janus/__init__.py000066400000000000000000000445731332704270500161040ustar00rootroot00000000000000import asyncio import logging import threading from asyncio import QueueEmpty as AsyncQueueEmpty from asyncio import QueueFull as AsyncQueueFull from collections import deque from heapq import heappop, heappush from queue import Empty as SyncQueueEmpty from queue import Full as SyncQueueFull __version__ = '0.4.0' log = logging.getLogger(__package__) try: ensure_future = asyncio.ensure_future except AttributeError: ensure_future = getattr(asyncio, 'async') class Queue: def __init__(self, maxsize=0, *, loop=None): if loop is None: loop = asyncio.get_event_loop() self._loop = loop self._maxsize = maxsize self._init(maxsize) self._unfinished_tasks = 0 self._sync_mutex = threading.Lock() self._sync_not_empty = threading.Condition(self._sync_mutex) self._sync_not_full = threading.Condition(self._sync_mutex) self._all_tasks_done = threading.Condition(self._sync_mutex) self._async_mutex = asyncio.Lock(loop=loop) self._async_not_empty = asyncio.Condition(self._async_mutex, loop=loop) self._async_not_full = asyncio.Condition(self._async_mutex, loop=loop) self._finished = asyncio.Event(loop=self._loop) self._finished.set() self._closing = False self._pending = set() def checked_call_soon_threadsafe(callback, *args): try: loop.call_soon_threadsafe(callback, *args) except RuntimeError: # swallowing agreed in #2 pass self._call_soon_threadsafe = checked_call_soon_threadsafe def checked_call_soon(callback, *args): if not loop.is_closed(): loop.call_soon(callback, *args) self._call_soon = checked_call_soon self._sync_queue = _SyncQueueProxy(self) self._async_queue = _AsyncQueueProxy(self) def close(self): with self._sync_mutex: self._closing = True for fut in self._pending: fut.cancel() async def wait_closed(self): # should be called from loop after close(). # Nobody should put/get at this point, # so lock acquiring is not required if not self._closing: raise RuntimeError("Waiting for non-closed queue") if not self._pending: return await asyncio.wait(self._pending, loop=self._loop) @property def closed(self): return self._closing and not self._pending @property def maxsize(self): return self._maxsize @property def sync_q(self): return self._sync_queue @property def async_q(self): return self._async_queue # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held def _init(self, maxsize): self._queue = deque() def _qsize(self): return len(self._queue) # Put a new item in the queue def _put(self, item): self._queue.append(item) # Get an item from the queue def _get(self): return self._queue.popleft() def _put_internal(self, item): self._put(item) self._unfinished_tasks += 1 self._finished.clear() def _notify_sync_not_empty(self): def f(): with self._sync_mutex: self._sync_not_empty.notify() self._loop.run_in_executor(None, f) def _notify_sync_not_full(self): def f(): with self._sync_mutex: self._sync_not_full.notify() fut = self._loop.run_in_executor(None, f) fut.add_done_callback(self._pending.discard) self._pending.add(fut) def _notify_async_not_empty(self, *, threadsafe): async def f(): async with self._async_mutex: self._async_not_empty.notify() def task_maker(): task = ensure_future(f(), loop=self._loop) task.add_done_callback(self._pending.discard) self._pending.add(task) if threadsafe: self._call_soon_threadsafe(task_maker) else: self._call_soon(task_maker) def _notify_async_not_full(self, *, threadsafe): async def f(): async with self._async_mutex: self._async_not_full.notify() def task_maker(): task = ensure_future(f(), loop=self._loop) task.add_done_callback(self._pending.discard) self._pending.add(task) if threadsafe: self._call_soon_threadsafe(task_maker) else: self._call_soon(task_maker) def _check_closing(self): if self._closing: raise RuntimeError('Modification of closed queue is forbidden') class _SyncQueueProxy: '''Create a queue object with a given maximum size. If maxsize is <= 0, the queue size is infinite. ''' def __init__(self, parent): self._parent = parent @property def maxsize(self): return self._parent._maxsize @property def closed(self): return self._parent.closed def task_done(self): '''Indicate that a formerly enqueued task is complete. Used by Queue consumer threads. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises a ValueError if called more times than there were items placed in the queue. ''' self._parent._check_closing() with self._parent._all_tasks_done: unfinished = self._parent._unfinished_tasks - 1 if unfinished <= 0: if unfinished < 0: raise ValueError('task_done() called too many times') self._parent._all_tasks_done.notify_all() self._parent._loop.call_soon_threadsafe( self._parent._finished.set) self._parent._unfinished_tasks = unfinished def join(self): '''Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. ''' with self._parent._all_tasks_done: while self._parent._unfinished_tasks: self._parent._all_tasks_done.wait() def qsize(self): '''Return the approximate size of the queue (not reliable!).''' return self._parent._qsize() @property def unfinished_tasks(self): '''Return the number of unfinished tasks.''' return self._parent._unfinished_tasks def empty(self): '''Return True if the queue is empty, False otherwise (not reliable!). This method is likely to be removed at some point. Use qsize() == 0 as a direct substitute, but be aware that either approach risks a race condition where a queue can grow before the result of empty() or qsize() can be used. To create code that needs to wait for all queued tasks to be completed, the preferred technique is to use the join() method. ''' return not self._parent._qsize() def full(self): '''Return True if the queue is full, False otherwise (not reliable!). This method is likely to be removed at some point. Use qsize() >= n as a direct substitute, but be aware that either approach risks a race condition where a queue can shrink before the result of full() or qsize() can be used. ''' return 0 < self._parent._maxsize <= self._parent._qsize() def put(self, item, block=True, timeout=None): '''Put an item into the queue. If optional args 'block' is true and 'timeout' is None (the default), block if necessary until a free slot is available. If 'timeout' is a non-negative number, it blocks at most 'timeout' seconds and raises the Full exception if no free slot was available within that time. Otherwise ('block' is false), put an item on the queue if a free slot is immediately available, else raise the Full exception ('timeout' is ignored in that case). ''' self._parent._check_closing() with self._parent._sync_not_full: if self._parent._maxsize > 0: if not block: if self._parent._qsize() >= self._parent._maxsize: raise SyncQueueFull elif timeout is None: while self._parent._qsize() >= self._parent._maxsize: self._parent._sync_not_full.wait() elif timeout < 0: raise ValueError("'timeout' must be a non-negative number") else: time = self._parent._loop.time endtime = time() + timeout while self._parent._qsize() >= self._parent._maxsize: remaining = endtime - time() if remaining <= 0.0: raise SyncQueueFull self._parent._sync_not_full.wait(remaining) self._parent._put_internal(item) self._parent._sync_not_empty.notify() self._parent._notify_async_not_empty(threadsafe=True) def get(self, block=True, timeout=None): '''Remove and return an item from the queue. If optional args 'block' is true and 'timeout' is None (the default), block if necessary until an item is available. If 'timeout' is a non-negative number, it blocks at most 'timeout' seconds and raises the Empty exception if no item was available within that time. Otherwise ('block' is false), return an item if one is immediately available, else raise the Empty exception ('timeout' is ignored in that case). ''' self._parent._check_closing() with self._parent._sync_not_empty: if not block: if not self._parent._qsize(): raise SyncQueueEmpty elif timeout is None: while not self._parent._qsize(): self._parent._sync_not_empty.wait() elif timeout < 0: raise ValueError("'timeout' must be a non-negative number") else: time = self._parent._loop.time endtime = time() + timeout while not self._parent._qsize(): remaining = endtime - time() if remaining <= 0.0: raise SyncQueueEmpty self._parent._sync_not_empty.wait(remaining) item = self._parent._get() self._parent._sync_not_full.notify() self._parent._notify_async_not_full(threadsafe=True) return item def put_nowait(self, item): '''Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the Full exception. ''' return self.put(item, block=False) def get_nowait(self): '''Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the Empty exception. ''' return self.get(block=False) class _AsyncQueueProxy: '''Create a queue object with a given maximum size. If maxsize is <= 0, the queue size is infinite. ''' def __init__(self, parent): self._parent = parent @property def closed(self): return self._parent.closed def qsize(self): """Number of items in the queue.""" return self._parent._qsize() @property def unfinished_tasks(self): '''Return the number of unfinished tasks.''' return self._parent._unfinished_tasks @property def maxsize(self): """Number of items allowed in the queue.""" return self._parent._maxsize def empty(self): """Return True if the queue is empty, False otherwise.""" return self.qsize() == 0 def full(self): """Return True if there are maxsize items in the queue. Note: if the Queue was initialized with maxsize=0 (the default), then full() is never True. """ if self._parent._maxsize <= 0: return False else: return self.qsize() >= self._parent._maxsize async def put(self, item): """Put an item into the queue. Put an item into the queue. If the queue is full, wait until a free slot is available before adding item. This method is a coroutine. """ self._parent._check_closing() async with self._parent._async_not_full: self._parent._sync_mutex.acquire() locked = True try: if self._parent._maxsize > 0: do_wait = True while do_wait: do_wait = ( self._parent._qsize() >= self._parent._maxsize ) if do_wait: locked = False self._parent._sync_mutex.release() await self._parent._async_not_full.wait() self._parent._sync_mutex.acquire() locked = True self._parent._put_internal(item) self._parent._async_not_empty.notify() self._parent._notify_sync_not_empty() finally: if locked: self._parent._sync_mutex.release() def put_nowait(self, item): """Put an item into the queue without blocking. If no free slot is immediately available, raise QueueFull. """ self._parent._check_closing() with self._parent._sync_mutex: if self._parent._maxsize > 0: if self._parent._qsize() >= self._parent._maxsize: raise AsyncQueueFull self._parent._put_internal(item) self._parent._notify_async_not_empty(threadsafe=False) self._parent._notify_sync_not_empty() async def get(self): """Remove and return an item from the queue. If queue is empty, wait until an item is available. This method is a coroutine. """ self._parent._check_closing() async with self._parent._async_not_empty: self._parent._sync_mutex.acquire() locked = True try: do_wait = True while do_wait: do_wait = self._parent._qsize() == 0 if do_wait: locked = False self._parent._sync_mutex.release() await self._parent._async_not_empty.wait() self._parent._sync_mutex.acquire() locked = True item = self._parent._get() self._parent._async_not_full.notify() self._parent._notify_sync_not_full() return item finally: if locked: self._parent._sync_mutex.release() def get_nowait(self): """Remove and return an item from the queue. Return an item if one is immediately available, else raise QueueEmpty. """ self._parent._check_closing() with self._parent._sync_mutex: if self._parent._qsize() == 0: raise AsyncQueueEmpty item = self._parent._get() self._parent._notify_async_not_full(threadsafe=False) self._parent._notify_sync_not_full() return item def task_done(self): """Indicate that a formerly enqueued task is complete. Used by queue consumers. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises ValueError if called more times than there were items placed in the queue. """ self._parent._check_closing() with self._parent._all_tasks_done: if self._parent._unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self._parent._unfinished_tasks -= 1 if self._parent._unfinished_tasks == 0: self._parent._finished.set() self._parent._all_tasks_done.notify_all() async def join(self): """Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer calls task_done() to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. """ while True: with self._parent._sync_mutex: if self._parent._unfinished_tasks == 0: break await self._parent._finished.wait() class PriorityQueue(Queue): '''Variant of Queue that retrieves open entries in priority order (lowest first). Entries are typically tuples of the form: (priority number, data). ''' def _init(self, maxsize): self._queue = [] def _qsize(self): return len(self._queue) def _put(self, item): heappush(self._queue, item) def _get(self): return heappop(self._queue) class LifoQueue(Queue): '''Variant of Queue that retrieves most recently added entries first.''' def _init(self, maxsize): self._queue = deque() def _qsize(self): return len(self._queue) def _put(self, item): self._queue.append(item) def _get(self): return self._queue.pop() janus-0.4.0/janus/__init__.pyi000066400000000000000000000030001332704270500162310ustar00rootroot00000000000000from typing import Generic, TypeVar, Optional, Generator, Any import asyncio _T = TypeVar('_T') class Queue(Generic[_T]): def __init__(self, maxsize: int=0, *, loop: Optional[asyncio.AbstractEventLoop]=None) -> None: ... def close(self) -> None: ... async def wait_closed(self) -> None: ... @property def maxsize(self) -> int: ... @property def sync_q(self) -> _SyncQueueProxy[_T]: ... @property def async_q(self) -> _AsyncQueueProxy[_T]: ... class _SyncQueueProxy(Generic[_T]): @property def maxsize(self) -> int: ... def task_done(self) -> None: ... def join(self) -> None: ... def qsize(self) -> int: ... def empty(self) -> bool: ... def full(self) -> bool: ... def put(self, item: _T, block: bool=True, timeout: Optional[float]=None) -> None: ... def get(self, block: bool=True, timeout: Optional[float]=None) -> _T: ... def put_nowait(self, item: _T) -> None: ... def get_nowait(self) -> _T: ... class _AsyncQueueProxy(Generic[_T]): @property def maxsize(self) -> int: ... def task_done(self) -> None: ... async def join(self) -> None: ... def qsize(self) -> int: ... def empty(self) -> bool: ... def full(self) -> bool: ... async def put(self, item: _T) -> None: ... async def get(self) -> _T: ... def put_nowait(self, item: _T) -> None: ... def get_nowait(self) -> _T: ... class PriorityQueue(Queue[_T], Generic[_T]): ... class LifoQueue(Queue[_T], Generic[_T]): ... janus-0.4.0/janus/py.typed000066400000000000000000000000151332704270500154510ustar00rootroot00000000000000# Placeholderjanus-0.4.0/requirements-dev.txt000066400000000000000000000002261332704270500166760ustar00rootroot00000000000000-e . ipython==6.4.0 ipdb==0.11 coverage==4.5.1 flake8==3.5.0 pytest==3.6.3 pytest-cov==2.5.1 mypy-lang==0.5.0 tox==3.1.2 wheel==0.31.1 docutils==0.14 janus-0.4.0/setup.py000066400000000000000000000045361332704270500143600ustar00rootroot00000000000000import codecs from setuptools import setup, find_packages import os import re import sys from setuptools.command.test import test as TestCommand PY_33 = sys.version_info < (3, 4) PY_35 = sys.version_info >= (3, 5) class PyTest(TestCommand): user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) with codecs.open(os.path.join(os.path.abspath(os.path.dirname( __file__)), 'janus', '__init__.py'), 'r', 'latin1') as fp: try: version = re.findall(r"^__version__ = '([^']+)'$", fp.read(), re.M)[0] except IndexError: raise RuntimeError('Unable to determine version.') def read(f): return open(os.path.join(os.path.dirname(__file__), f)).read().strip() install_requires = [] if PY_33: install_requires.append('asyncio') # if not PY_35: # install_requires.append('typing') tests_require = install_requires + ['pytest'] extras_require = {} setup( name='janus', version=version, description=("Mixed sync-async queue to interoperate between " "asyncio tasks and classic threads"), long_description='\n\n'.join((read('README.rst'), read('CHANGES.rst'))), classifiers=[ 'License :: OSI Approved :: Apache Software License', 'Intended Audience :: Developers', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Software Development :: Libraries', 'Framework :: AsyncIO', ], author='Andrew Svetlov', author_email='andrew.svetlov@gmail.com', url='https://github.com/aio-libs/janus/', license='Apache 2', packages=find_packages(), python_requires='>=3.5.3', install_requires=install_requires, tests_require=tests_require, cmdclass={'test': PyTest}, include_package_data=True, extras_require=extras_require) janus-0.4.0/tests/000077500000000000000000000000001332704270500140005ustar00rootroot00000000000000janus-0.4.0/tests/test_async.py000066400000000000000000000410561332704270500165340ustar00rootroot00000000000000"""Tests for queues.py""" import asyncio import concurrent.futures import unittest import janus class _QueueTestBase(unittest.TestCase): def setUp(self): self.loop = asyncio.new_event_loop() self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=5) self.loop.set_default_executor(self.executor) asyncio.set_event_loop(None) def tearDown(self): self.executor.shutdown() self.loop.close() class QueueBasicTests(_QueueTestBase): def _test_repr_or_str(self, fn, expect_id): """Test Queue's repr or str. fn is repr or str. expect_id is True if we expect the Queue's id to appear in fn(Queue()). """ _q = janus.Queue(loop=self.loop) q = _q.async_q self.assertTrue(fn(q).startswith(' 0 and q.qsize() == q._parent._maxsize # A thread to run a function that unclogs a blocked Queue. class _TriggerThread(threading.Thread): def __init__(self, fn, args): self.fn = fn self.args = args self.startedEvent = threading.Event() threading.Thread.__init__(self) def run(self): # The sleep isn't necessary, but is intended to give the blocking # function in the main thread a chance at actually blocking before # we unclog it. But if the sleep is longer than the timeout-based # tests wait in their blocking functions, those tests will fail. # So we give them much longer timeout values compared to the # sleep here (I aimed at 10 seconds for blocking functions -- # they should never actually wait that long - they should make # progress as soon as we call self.fn()). time.sleep(0.1) self.startedEvent.set() self.fn(*self.args) # Execute a function that blocks, and in a separate thread, a function that # triggers the release. Returns the result of the blocking function. Caution: # block_func must guarantee to block until trigger_func is called, and # trigger_func must guarantee to change queue state so that block_func can make # enough progress to return. In particular, a block_func that just raises an # exception regardless of whether trigger_func is called will lead to # timing-dependent sporadic failures, and one of those went rarely seen but # undiagnosed for years. Now block_func must be unexceptional. If block_func # is supposed to raise an exception, call do_exceptional_blocking_test() # instead. class BlockingTestMixin: def setUp(self): asyncio.set_event_loop(None) self.loop = asyncio.new_event_loop() self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=5) self.loop.set_default_executor(self.executor) def tearDown(self): self.t = None self.executor.shutdown() self.loop.close() def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args): self.t = _TriggerThread(trigger_func, trigger_args) self.t.start() self.result = block_func(*block_args) # If block_func returned before our thread made the call, we failed! if not self.t.startedEvent.is_set(): self.fail("blocking function '%r' appeared not to block" % block_func) self.t.join(10) # make sure the thread terminates if self.t.is_alive(): self.fail("trigger function '%r' appeared to not return" % trigger_func) return self.result # Call this instead if block_func is supposed to raise an exception. def do_exceptional_blocking_test(self, block_func, block_args, trigger_func, trigger_args, expected_exception_class): self.t = _TriggerThread(trigger_func, trigger_args) self.t.start() try: try: block_func(*block_args) except expected_exception_class: raise else: self.fail("expected exception of kind %r" % expected_exception_class) finally: self.t.join(10) # make sure the thread terminates if self.t.is_alive(): self.fail("trigger function '%r' appeared to not return" % trigger_func) if not self.t.startedEvent.is_set(): self.fail("trigger thread ended but event never set") class BaseQueueTestMixin(BlockingTestMixin): def setUp(self): self.cum = 0 self.cumlock = threading.Lock() super().setUp() def simple_queue_test(self, _q): q = _q.sync_q if q.qsize(): raise RuntimeError("Call this function with an empty queue") self.assertTrue(q.empty()) self.assertFalse(q.full()) # I guess we better check things actually queue correctly a little :) q.put(111) q.put(333) q.put(222) target_order = dict(Queue=[111, 333, 222], LifoQueue=[222, 333, 111], PriorityQueue=[111, 222, 333]) actual_order = [q.get(), q.get(), q.get()] self.assertEqual(actual_order, target_order[_q.__class__.__name__], "Didn't seem to queue the correct data!") for i in range(QUEUE_SIZE - 1): q.put(i) self.assertTrue(q.qsize(), "Queue should not be empty") self.assertTrue(not qfull(q), "Queue should not be full") last = 2 * QUEUE_SIZE full = 3 * 2 * QUEUE_SIZE q.put(last) self.assertTrue(qfull(q), "Queue should be full") self.assertFalse(q.empty()) self.assertTrue(q.full()) try: q.put(full, block=0) self.fail("Didn't appear to block with a full queue") except queue.Full: pass try: q.put(full, timeout=0.01) self.fail("Didn't appear to time-out with a full queue") except queue.Full: pass # Test a blocking put self.do_blocking_test(q.put, (full, ), q.get, ()) self.do_blocking_test(q.put, (full, True, 10), q.get, ()) # Empty it for i in range(QUEUE_SIZE): q.get() self.assertTrue(not q.qsize(), "Queue should be empty") try: q.get(block=0) self.fail("Didn't appear to block with an empty queue") except queue.Empty: pass try: q.get(timeout=0.01) self.fail("Didn't appear to time-out with an empty queue") except queue.Empty: pass # Test a blocking get self.do_blocking_test(q.get, (), q.put, ('empty', )) self.do_blocking_test(q.get, (True, 10), q.put, ('empty', )) def worker(self, q): while True: x = q.get() if x < 0: q.task_done() return with self.cumlock: self.cum += x q.task_done() def queue_join_test(self, q): self.cum = 0 for i in (0, 1): threading.Thread(target=self.worker, args=(q, )).start() for i in range(100): q.put(i) q.join() self.assertEqual(self.cum, sum(range(100)), "q.join() did not block until all tasks were done") for i in (0, 1): q.put(-1) # instruct the threads to close q.join() # verify that you can join twice def test_queue_task_done(self): # Test to make sure a queue task completed successfully. q = self.type2test(loop=self.loop).sync_q try: q.task_done() except ValueError: pass else: self.fail("Did not detect task count going negative") def test_queue_join(self): # Test that a queue join()s successfully, and before anything else # (done twice for insurance). q = self.type2test(loop=self.loop).sync_q self.queue_join_test(q) self.queue_join_test(q) try: q.task_done() except ValueError: pass else: self.fail("Did not detect task count going negative") def test_simple_queue(self): # Do it a couple of times on the same queue. # Done twice to make sure works with same instance reused. q = self.type2test(QUEUE_SIZE, loop=self.loop) self.simple_queue_test(q) self.simple_queue_test(q) def test_negative_timeout_raises_exception(self): q = self.type2test(QUEUE_SIZE, loop=self.loop).sync_q with self.assertRaises(ValueError): q.put(1, timeout=-1) with self.assertRaises(ValueError): q.get(1, timeout=-1) def test_nowait(self): q = self.type2test(QUEUE_SIZE, loop=self.loop).sync_q for i in range(QUEUE_SIZE): q.put_nowait(1) with self.assertRaises(queue.Full): q.put_nowait(1) for i in range(QUEUE_SIZE): q.get_nowait() with self.assertRaises(queue.Empty): q.get_nowait() def test_shrinking_queue(self): # issue 10110 q = self.type2test(3, loop=self.loop).sync_q q.put(1) q.put(2) q.put(3) with self.assertRaises(queue.Full): q.put_nowait(4) self.assertEqual(q.qsize(), 3) q._maxsize = 2 # shrink the queue with self.assertRaises(queue.Full): q.put_nowait(4) def test_maxsize(self): # Test to make sure a queue task completed successfully. q = self.type2test(maxsize=5, loop=self.loop).sync_q self.assertEqual(q.maxsize, 5) class QueueTest(BaseQueueTestMixin, unittest.TestCase): type2test = janus.Queue class LifoQueueTest(BaseQueueTestMixin, unittest.TestCase): type2test = janus.LifoQueue class PriorityQueueTest(BaseQueueTestMixin, unittest.TestCase): type2test = janus.PriorityQueue # A Queue subclass that can provoke failure at a moment's notice :) class FailingQueueException(Exception): pass class FailingQueue(janus.Queue): def __init__(self, *args, **kwargs): self.fail_next_put = False self.fail_next_get = False super().__init__(*args, **kwargs) def _put(self, item): if self.fail_next_put: self.fail_next_put = False raise FailingQueueException("You Lose") return super()._put(item) def _get(self): if self.fail_next_get: self.fail_next_get = False raise FailingQueueException("You Lose") return super()._get() class FailingQueueTest(BlockingTestMixin, unittest.TestCase): def failing_queue_test(self, _q): q = _q.sync_q if q.qsize(): raise RuntimeError("Call this function with an empty queue") for i in range(QUEUE_SIZE - 1): q.put(i) # Test a failing non-blocking put. _q.fail_next_put = True try: q.put("oops", block=0) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass _q.fail_next_put = True try: q.put("oops", timeout=0.1) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass q.put("last") self.assertTrue(qfull(q), "Queue should be full") # Test a failing blocking put _q.fail_next_put = True try: self.do_blocking_test(q.put, ("full", ), q.get, ()) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass # Check the Queue isn't damaged. # put failed, but get succeeded - re-add q.put("last") # Test a failing timeout put _q.fail_next_put = True try: self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (), FailingQueueException) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass # Check the Queue isn't damaged. # put failed, but get succeeded - re-add q.put("last") self.assertTrue(qfull(q), "Queue should be full") q.get() self.assertTrue(not qfull(q), "Queue should not be full") q.put("last") self.assertTrue(qfull(q), "Queue should be full") # Test a blocking put self.do_blocking_test(q.put, ("full", ), q.get, ()) # Empty it for i in range(QUEUE_SIZE): q.get() self.assertTrue(not q.qsize(), "Queue should be empty") q.put("first") _q.fail_next_get = True try: q.get() self.fail("The queue didn't fail when it should have") except FailingQueueException: pass self.assertTrue(q.qsize(), "Queue should not be empty") _q.fail_next_get = True try: q.get(timeout=0.1) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass self.assertTrue(q.qsize(), "Queue should not be empty") q.get() self.assertTrue(not q.qsize(), "Queue should be empty") _q.fail_next_get = True try: self.do_exceptional_blocking_test(q.get, (), q.put, ('empty', ), FailingQueueException) self.fail("The queue didn't fail when it should have") except FailingQueueException: pass # put succeeded, but get failed. self.assertTrue(q.qsize(), "Queue should not be empty") q.get() self.assertTrue(not q.qsize(), "Queue should be empty") def test_failing_queue(self): # Test to make sure a queue is functioning correctly. # Done twice to the same instance. q = FailingQueue(QUEUE_SIZE, loop=self.loop) self.failing_queue_test(q) self.failing_queue_test(q) def test_closed_loop_non_failing(self): q = janus.Queue(QUEUE_SIZE, loop=self.loop).sync_q # we are pacthing loop to follow setUp/tearDown agreement with patch.object(self.loop, 'call_soon_threadsafe') as func: func.side_effect = RuntimeError() q.put_nowait(1) self.assertEqual(func.call_count, 1) if __name__ == "__main__": unittest.main() janus-0.4.0/tox.ini000066400000000000000000000012501332704270500141470ustar00rootroot00000000000000[tox] envlist = check, {py34,py35}-{debug,release}, report [testenv] deps = coverage pytest commands = coverage run -m pytest {posargs} mv .coverage .coverage.{envname} setenv = debug: PYTHONASYNCIODEBUG = 1 basepython: py33: python3.3 py34: python3.4 py35: python3.5 whitelist_externals = coverage mv echo [testenv:check] deps = wheel flake8 coverage docutils commands = flake8 janus tests coverage erase basepython: python3 [testenv:report] commands = coverage combine coverage report coverage html echo "open file://{toxinidir}/coverage/index.html" basepython: python3