pax_global_header00006660000000000000000000000064147656035750014534gustar00rootroot0000000000000052 comment=b19193ce0c4ea565a307415b9d5eccc7693e66d3 async-lru-2.0.5/000077500000000000000000000000001476560357500134555ustar00rootroot00000000000000async-lru-2.0.5/.github/000077500000000000000000000000001476560357500150155ustar00rootroot00000000000000async-lru-2.0.5/.github/dependabot.yml000066400000000000000000000003121476560357500176410ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: pip directory: "/" schedule: interval: daily - package-ecosystem: "github-actions" directory: "/" schedule: interval: "monthly" async-lru-2.0.5/.github/workflows/000077500000000000000000000000001476560357500170525ustar00rootroot00000000000000async-lru-2.0.5/.github/workflows/auto-merge.yaml000066400000000000000000000011401476560357500217770ustar00rootroot00000000000000name: Dependabot auto-merge on: pull_request_target permissions: pull-requests: write contents: write jobs: dependabot: runs-on: ubuntu-latest if: ${{ github.actor == 'dependabot[bot]' }} steps: - name: Dependabot metadata id: metadata uses: dependabot/fetch-metadata@v2.3.0 with: github-token: "${{ secrets.GITHUB_TOKEN }}" - name: Enable auto-merge for Dependabot PRs run: gh pr merge --auto --squash "$PR_URL" env: PR_URL: ${{github.event.pull_request.html_url}} GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} async-lru-2.0.5/.github/workflows/ci-cd.yml000066400000000000000000000103451476560357500205570ustar00rootroot00000000000000name: CI on: push: branches: - master - '[0-9].[0-9]+' # matches to backport branches, e.g. 3.6 tags: [ 'v*' ] pull_request: branches: - master - '[0-9].[0-9]+' jobs: lint: name: Linter runs-on: ubuntu-latest timeout-minutes: 5 steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v5 with: python-version: 3.9 cache: 'pip' cache-dependency-path: '**/requirements*.txt' - name: Pre-Commit hooks uses: pre-commit/action@v3.0.1 - name: Install dependencies uses: py-actions/py-dependency-install@v4.1.0 with: path: requirements-dev.txt - name: Install itself run: | pip install . - name: Run linter run: | make lint - name: Prepare twine checker run: | pip install -U twine wheel build python -m build - name: Run twine checker run: | twine check dist/* test: name: Test strategy: matrix: pyver: ['3.9', '3.10', '3.11', '3.12', '3.13'] os: [ubuntu, macos, windows] experimental: [false] include: - pyver: pypy-3.9 os: ubuntu experimental: false - os: ubuntu pyver: "3.14" experimental: true fail-fast: true runs-on: ${{ matrix.os }}-latest timeout-minutes: 15 continue-on-error: ${{ matrix.experimental }} steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Python ${{ matrix.pyver }} uses: actions/setup-python@v5 with: allow-prereleases: true python-version: ${{ matrix.pyver }} cache: 'pip' cache-dependency-path: '**/requirements*.txt' - name: Install dependencies uses: py-actions/py-dependency-install@v4.1.0 with: path: requirements.txt - name: Run unittests run: make test env: COLOR: 'yes' - run: python -m coverage xml - name: Upload coverage uses: codecov/codecov-action@v5 with: file: ./coverage.xml flags: unit check: # This job does nothing and is only used for the branch protection if: always() needs: [lint, test] runs-on: ubuntu-latest steps: - name: Decide whether the needed jobs succeeded or failed uses: re-actors/alls-green@release/v1 with: jobs: ${{ toJSON(needs) }} deploy: name: Deploy runs-on: ubuntu-latest needs: [check] if: github.event_name == 'push' && contains(github.ref, 'refs/tags/') permissions: contents: write # IMPORTANT: mandatory for making GitHub Releases id-token: write # IMPORTANT: mandatory for trusted publishing & sigstore environment: name: pypi url: https://pypi.org/p/async-lru steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v5 with: python-version: 3.13 - name: Install dependencies run: python -m pip install -U pip wheel setuptools build twine - name: Build dists run: | python -m build - name: Make Release uses: aio-libs/create-release@v1.6.6 with: changes_file: CHANGES.rst name: async-lru version_file: async_lru/__init__.py github_token: ${{ secrets.GITHUB_TOKEN }} dist_dir: dist fix_issue_regex: "`#(\\d+) `" fix_issue_repl: "(#\\1)" - name: >- Publish 🐍đŸ“Ļ to PyPI uses: pypa/gh-action-pypi-publish@release/v1 - name: Sign the dists with Sigstore uses: sigstore/gh-action-sigstore-python@v3.0.0 with: inputs: >- ./dist/*.tar.gz ./dist/*.whl - name: Upload artifact signatures to GitHub Release # Confusingly, this action also supports updating releases, not # just creating them. This is what we want here, since we've manually # created the release above. uses: softprops/action-gh-release@v2 with: # dist/ contains the built packages, which smoketest-artifacts/ # contains the signatures and certificates. files: dist/** async-lru-2.0.5/.github/workflows/codeql.yml000066400000000000000000000044261476560357500210520ustar00rootroot00000000000000name: "CodeQL" on: push: branches: [ 'master' ] pull_request: # The branches below must be a subset of the branches above branches: [ 'master' ] schedule: - cron: '5 1 * * 4' jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: [ 'python' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # queries: security-extended,security-and-quality # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v3 # â„šī¸ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun # If the Autobuild fails above, remove it and uncomment the following three lines. # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. # - run: | # echo "Run, Build Application using script" # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 with: category: "/language:${{matrix.language}}" async-lru-2.0.5/.gitignore000066400000000000000000000013351476560357500154470ustar00rootroot00000000000000># Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] # C extensions *.so # Distribution / packaging .Python env/ pyvenv/ build/ develop-eggs/ dist/ downloads/ eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg .eggs # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .cache nosetests.xml coverage.xml cover # Translations *.mo *.pot # Sphinx documentation docs/_build/ # PyBuilder target/ # PyCharm .idea *.iml # rope .ropeproject .python-version async-lru-2.0.5/.pre-commit-config.yaml000066400000000000000000000027431476560357500177440ustar00rootroot00000000000000repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: 'v4.4.0' hooks: - id: check-merge-conflict - repo: https://github.com/asottile/yesqa rev: v1.4.0 hooks: - id: yesqa - repo: https://github.com/PyCQA/isort rev: '5.12.0' hooks: - id: isort - repo: https://github.com/psf/black rev: '23.1.0' hooks: - id: black language_version: python3 # Should be a command that runs python - repo: https://github.com/pre-commit/pre-commit-hooks rev: 'v4.4.0' hooks: - id: end-of-file-fixer exclude: >- ^docs/[^/]*\.svg$ - id: requirements-txt-fixer - id: trailing-whitespace - id: file-contents-sorter files: | CONTRIBUTORS.txt| docs/spelling_wordlist.txt| .gitignore| .gitattributes - id: check-case-conflict - id: check-json - id: check-xml - id: check-executables-have-shebangs - id: check-toml - id: check-xml - id: check-yaml - id: debug-statements - id: check-added-large-files - id: check-symlinks - id: debug-statements - id: detect-aws-credentials args: ['--allow-missing-credentials'] - id: detect-private-key exclude: ^examples/ - repo: https://github.com/PyCQA/flake8 rev: '6.0.0' hooks: - id: flake8 exclude: "^docs/" - repo: https://github.com/asottile/pyupgrade rev: 'v3.3.1' hooks: - id: pyupgrade args: ['--py36-plus'] - repo: https://github.com/Lucas-C/pre-commit-hooks-markup rev: v1.0.1 hooks: - id: rst-linter files: >- ^[^/]+[.]rst$ async-lru-2.0.5/CHANGES.rst000066400000000000000000000007221476560357500152600ustar00rootroot00000000000000======= CHANGES ======= .. towncrier release notes start 2.0.5 (2025-03-16) ================== - Fixed a memory leak on exceptions and minor performance improvement. 2.0.4 (2023-07-27) ================== - Fixed an error when there are pending tasks while calling ``.cache_clear()``. 2.0.3 (2023-07-07) ================== - Fixed a ``KeyError`` that could occur when using ``ttl`` with ``maxsize``. - Dropped ``typing-extensions`` dependency in Python 3.11+. async-lru-2.0.5/LICENSE000066400000000000000000000023121476560357500144600ustar00rootroot00000000000000The MIT License Copyright (c) 2018 aio-libs team https://github.com/aio-libs/ Copyright (c) 2017 Ocean S. A. https://ocean.io/ Copyright (c) 2016-2017 WikiBusiness Corporation http://wikibusiness.org/ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. async-lru-2.0.5/MANIFEST.in000066400000000000000000000002151476560357500152110ustar00rootroot00000000000000include README.rst include LICENSE include Makefile graft async_lru graft tests recursive-exclude * __pycache__ recursive-exclude * *.py[co] async-lru-2.0.5/Makefile000066400000000000000000000004361476560357500151200ustar00rootroot00000000000000# Some simple testing tasks (sorry, UNIX only). .PHONY: init setup init setup: pip install -r requirements-dev.txt pre-commit install .PHONY: fmt fmt: python -m pre_commit run --all-files --show-diff-on-failure .PHONY: lint lint: fmt mypy .PHONY: test test: pytest -s ./tests/ async-lru-2.0.5/README.rst000066400000000000000000000061641476560357500151530ustar00rootroot00000000000000async-lru ========= :info: Simple lru cache for asyncio .. image:: https://github.com/aio-libs/async-lru/actions/workflows/ci-cd.yml/badge.svg?event=push :target: https://github.com/aio-libs/async-lru/actions/workflows/ci-cd.yml?query=event:push :alt: GitHub Actions CI/CD workflows status .. image:: https://img.shields.io/pypi/v/async-lru.svg?logo=Python&logoColor=white :target: https://pypi.org/project/async-lru :alt: async-lru @ PyPI .. image:: https://codecov.io/gh/aio-libs/async-lru/branch/master/graph/badge.svg :target: https://codecov.io/gh/aio-libs/async-lru .. image:: https://img.shields.io/matrix/aio-libs:matrix.org?label=Discuss%20on%20Matrix%20at%20%23aio-libs%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat :target: https://matrix.to/#/%23aio-libs:matrix.org :alt: Matrix Room — #aio-libs:matrix.org .. image:: https://img.shields.io/matrix/aio-libs-space:matrix.org?label=Discuss%20on%20Matrix%20at%20%23aio-libs-space%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat :target: https://matrix.to/#/%23aio-libs-space:matrix.org :alt: Matrix Space — #aio-libs-space:matrix.org Installation ------------ .. code-block:: shell pip install async-lru Usage ----- This package is a port of Python's built-in `functools.lru_cache `_ function for `asyncio `_. To better handle async behaviour, it also ensures multiple concurrent calls will only result in 1 call to the wrapped function, with all ``await``\s receiving the result of that call when it completes. .. code-block:: python import asyncio import aiohttp from async_lru import alru_cache @alru_cache(maxsize=32) async def get_pep(num): resource = 'http://www.python.org/dev/peps/pep-%04d/' % num async with aiohttp.ClientSession() as session: try: async with session.get(resource) as s: return await s.read() except aiohttp.ClientError: return 'Not Found' async def main(): for n in 8, 290, 308, 320, 8, 218, 320, 279, 289, 320, 9991: pep = await get_pep(n) print(n, len(pep)) print(get_pep.cache_info()) # CacheInfo(hits=3, misses=8, maxsize=32, currsize=8) # closing is optional, but highly recommended await get_pep.cache_close() asyncio.run(main()) TTL (time-to-live in seconds, expiration on timeout) is supported by accepting `ttl` configuration parameter (off by default): .. code-block:: python @alru_cache(ttl=5) async def func(arg): return arg * 2 The library supports explicit invalidation for specific function call by `cache_invalidate()`: .. code-block:: python @alru_cache(ttl=5) async def func(arg1, arg2): return arg1 + arg2 func.cache_invalidate(1, arg2=2) The method returns `True` if corresponding arguments set was cached already, `False` otherwise. Thanks ------ The library was donated by `Ocean S.A. `_ Thanks to the company for contribution. async-lru-2.0.5/async_lru/000077500000000000000000000000001476560357500154545ustar00rootroot00000000000000async-lru-2.0.5/async_lru/__init__.py000066400000000000000000000223211476560357500175650ustar00rootroot00000000000000import asyncio import dataclasses import sys from asyncio.coroutines import _is_coroutine # type: ignore[attr-defined] from functools import _CacheInfo, _make_key, partial, partialmethod from typing import ( Any, Callable, Coroutine, Generic, Hashable, Optional, OrderedDict, Set, Type, TypedDict, TypeVar, Union, cast, final, overload, ) if sys.version_info >= (3, 11): from typing import Self else: from typing_extensions import Self __version__ = "2.0.5" __all__ = ("alru_cache",) _T = TypeVar("_T") _R = TypeVar("_R") _Coro = Coroutine[Any, Any, _R] _CB = Callable[..., _Coro[_R]] _CBP = Union[_CB[_R], "partial[_Coro[_R]]", "partialmethod[_Coro[_R]]"] @final class _CacheParameters(TypedDict): typed: bool maxsize: Optional[int] tasks: int closed: bool @final @dataclasses.dataclass class _CacheItem(Generic[_R]): fut: "asyncio.Future[_R]" later_call: Optional[asyncio.Handle] def cancel(self) -> None: if self.later_call is not None: self.later_call.cancel() self.later_call = None @final class _LRUCacheWrapper(Generic[_R]): def __init__( self, fn: _CB[_R], maxsize: Optional[int], typed: bool, ttl: Optional[float], ) -> None: try: self.__module__ = fn.__module__ except AttributeError: pass try: self.__name__ = fn.__name__ except AttributeError: pass try: self.__qualname__ = fn.__qualname__ except AttributeError: pass try: self.__doc__ = fn.__doc__ except AttributeError: pass try: self.__annotations__ = fn.__annotations__ except AttributeError: pass try: self.__dict__.update(fn.__dict__) except AttributeError: pass # set __wrapped__ last so we don't inadvertently copy it # from the wrapped function when updating __dict__ self._is_coroutine = _is_coroutine self.__wrapped__ = fn self.__maxsize = maxsize self.__typed = typed self.__ttl = ttl self.__cache: OrderedDict[Hashable, _CacheItem[_R]] = OrderedDict() self.__closed = False self.__hits = 0 self.__misses = 0 self.__tasks: Set["asyncio.Task[_R]"] = set() def cache_invalidate(self, /, *args: Hashable, **kwargs: Any) -> bool: key = _make_key(args, kwargs, self.__typed) cache_item = self.__cache.pop(key, None) if cache_item is None: return False else: cache_item.cancel() return True def cache_clear(self) -> None: self.__hits = 0 self.__misses = 0 for c in self.__cache.values(): if c.later_call: c.later_call.cancel() self.__cache.clear() self.__tasks.clear() async def cache_close(self, *, wait: bool = False) -> None: self.__closed = True tasks = list(self.__tasks) if not tasks: return if not wait: for task in tasks: if not task.done(): task.cancel() await asyncio.gather(*tasks, return_exceptions=True) def cache_info(self) -> _CacheInfo: return _CacheInfo( self.__hits, self.__misses, self.__maxsize, len(self.__cache), ) def cache_parameters(self) -> _CacheParameters: return _CacheParameters( maxsize=self.__maxsize, typed=self.__typed, tasks=len(self.__tasks), closed=self.__closed, ) def _cache_hit(self, key: Hashable) -> None: self.__hits += 1 self.__cache.move_to_end(key) def _cache_miss(self, key: Hashable) -> None: self.__misses += 1 def _task_done_callback( self, fut: "asyncio.Future[_R]", key: Hashable, task: "asyncio.Task[_R]" ) -> None: self.__tasks.discard(task) if task.cancelled(): fut.cancel() self.__cache.pop(key, None) return exc = task.exception() if exc is not None: fut.set_exception(exc) self.__cache.pop(key, None) return cache_item = self.__cache.get(key) if self.__ttl is not None and cache_item is not None: loop = asyncio.get_running_loop() cache_item.later_call = loop.call_later( self.__ttl, self.__cache.pop, key, None ) fut.set_result(task.result()) async def __call__(self, /, *fn_args: Any, **fn_kwargs: Any) -> _R: if self.__closed: raise RuntimeError(f"alru_cache is closed for {self}") loop = asyncio.get_running_loop() key = _make_key(fn_args, fn_kwargs, self.__typed) cache_item = self.__cache.get(key) if cache_item is not None: self._cache_hit(key) if not cache_item.fut.done(): return await asyncio.shield(cache_item.fut) return cache_item.fut.result() fut = loop.create_future() coro = self.__wrapped__(*fn_args, **fn_kwargs) task: asyncio.Task[_R] = loop.create_task(coro) self.__tasks.add(task) task.add_done_callback(partial(self._task_done_callback, fut, key)) self.__cache[key] = _CacheItem(fut, None) if self.__maxsize is not None and len(self.__cache) > self.__maxsize: dropped_key, cache_item = self.__cache.popitem(last=False) cache_item.cancel() self._cache_miss(key) return await asyncio.shield(fut) def __get__( self, instance: _T, owner: Optional[Type[_T]] ) -> Union[Self, "_LRUCacheWrapperInstanceMethod[_R, _T]"]: if owner is None: return self else: return _LRUCacheWrapperInstanceMethod(self, instance) @final class _LRUCacheWrapperInstanceMethod(Generic[_R, _T]): def __init__( self, wrapper: _LRUCacheWrapper[_R], instance: _T, ) -> None: try: self.__module__ = wrapper.__module__ except AttributeError: pass try: self.__name__ = wrapper.__name__ except AttributeError: pass try: self.__qualname__ = wrapper.__qualname__ except AttributeError: pass try: self.__doc__ = wrapper.__doc__ except AttributeError: pass try: self.__annotations__ = wrapper.__annotations__ except AttributeError: pass try: self.__dict__.update(wrapper.__dict__) except AttributeError: pass # set __wrapped__ last so we don't inadvertently copy it # from the wrapped function when updating __dict__ self._is_coroutine = _is_coroutine self.__wrapped__ = wrapper.__wrapped__ self.__instance = instance self.__wrapper = wrapper def cache_invalidate(self, /, *args: Hashable, **kwargs: Any) -> bool: return self.__wrapper.cache_invalidate(self.__instance, *args, **kwargs) def cache_clear(self) -> None: self.__wrapper.cache_clear() async def cache_close( self, *, cancel: bool = False, return_exceptions: bool = True ) -> None: await self.__wrapper.cache_close() def cache_info(self) -> _CacheInfo: return self.__wrapper.cache_info() def cache_parameters(self) -> _CacheParameters: return self.__wrapper.cache_parameters() async def __call__(self, /, *fn_args: Any, **fn_kwargs: Any) -> _R: return await self.__wrapper(self.__instance, *fn_args, **fn_kwargs) def _make_wrapper( maxsize: Optional[int], typed: bool, ttl: Optional[float] = None, ) -> Callable[[_CBP[_R]], _LRUCacheWrapper[_R]]: def wrapper(fn: _CBP[_R]) -> _LRUCacheWrapper[_R]: origin = fn while isinstance(origin, (partial, partialmethod)): origin = origin.func if not asyncio.iscoroutinefunction(origin): raise RuntimeError(f"Coroutine function is required, got {fn!r}") # functools.partialmethod support if hasattr(fn, "_make_unbound_method"): fn = fn._make_unbound_method() return _LRUCacheWrapper(cast(_CB[_R], fn), maxsize, typed, ttl) return wrapper @overload def alru_cache( maxsize: Optional[int] = 128, typed: bool = False, *, ttl: Optional[float] = None, ) -> Callable[[_CBP[_R]], _LRUCacheWrapper[_R]]: ... @overload def alru_cache( maxsize: _CBP[_R], /, ) -> _LRUCacheWrapper[_R]: ... def alru_cache( maxsize: Union[Optional[int], _CBP[_R]] = 128, typed: bool = False, *, ttl: Optional[float] = None, ) -> Union[Callable[[_CBP[_R]], _LRUCacheWrapper[_R]], _LRUCacheWrapper[_R]]: if maxsize is None or isinstance(maxsize, int): return _make_wrapper(maxsize, typed, ttl) else: fn = cast(_CB[_R], maxsize) if callable(fn) or hasattr(fn, "_make_unbound_method"): return _make_wrapper(128, False, None)(fn) raise NotImplementedError(f"{fn!r} decorating is not supported") async-lru-2.0.5/async_lru/py.typed000066400000000000000000000000001476560357500171410ustar00rootroot00000000000000async-lru-2.0.5/requirements-dev.txt000066400000000000000000000002651476560357500175200ustar00rootroot00000000000000-r requirements.txt flake8==7.1.2 flake8-bandit==4.1.1 flake8-bugbear==24.12.12 flake8-import-order==0.18.2 flake8-requirements==2.2.1 mypy==1.15.0; implementation_name=="cpython" async-lru-2.0.5/requirements.txt000066400000000000000000000001441476560357500167400ustar00rootroot00000000000000-e . coverage==7.6.12 pytest==8.3.5 pytest-asyncio==0.25.3 pytest-cov==6.0.0 pytest-timeout==2.3.1 async-lru-2.0.5/setup.cfg000066400000000000000000000037101476560357500152770ustar00rootroot00000000000000[metadata] name = async-lru version = attr: async_lru.__version__ url = https://github.com/aio-libs/async-lru project_urls = Chat: Matrix = https://matrix.to/#/#aio-libs:matrix.org Chat: Matrix Space = https://matrix.to/#/#aio-libs-space:matrix.org CI: GitHub Actions = https://github.com/aio-libs/async-lru/actions GitHub: repo = https://github.com/aio-libs/async-lru description = Simple LRU cache for asyncio long_description = file: README.rst long_description_content_type = text/x-rst maintainer = aiohttp team maintainer_email = team@aiohttp.org license = MIT License license_files = LICENSE classifiers = License :: OSI Approved :: MIT License Intended Audience :: Developers Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Programming Language :: Python :: 3.13 Development Status :: 5 - Production/Stable Framework :: AsyncIO keywords = asyncio lru lru_cache [options] python_requires = >=3.9 packages = find: install_requires = typing_extensions>=4.0.0; python_version<"3.11" [options.package_data] * = py.typed [flake8] exclude = .git,.env,__pycache__,.eggs max-line-length = 88 extend-select = B950 ignore = N801,N802,N803,E252,W503,E133,E203,E501 [coverage:run] branch = True omit = site-packages [isort] line_length=88 include_trailing_comma=True multi_line_output=3 force_grid_wrap=0 combine_as_imports=True lines_after_imports=2 known_first_party=async_lru [tool:pytest] addopts= -s --keep-duplicates --cache-clear --verbose --no-cov-on-fail --cov=async_lru --cov=tests/ --cov-report=term --cov-report=html filterwarnings = error testpaths = tests/ junit_family=xunit2 asyncio_mode=auto timeout=15 xfail_strict = true [mypy] strict=True pretty=True packages=async_lru, tests async-lru-2.0.5/setup.py000066400000000000000000000000471476560357500151700ustar00rootroot00000000000000from setuptools import setup setup() async-lru-2.0.5/tests/000077500000000000000000000000001476560357500146175ustar00rootroot00000000000000async-lru-2.0.5/tests/conftest.py000066400000000000000000000011551476560357500170200ustar00rootroot00000000000000from functools import _CacheInfo from typing import Callable import pytest from async_lru import _R, _LRUCacheWrapper @pytest.fixture def check_lru() -> Callable[..., None]: def _check_lru( wrapped: _LRUCacheWrapper[_R], *, hits: int, misses: int, cache: int, tasks: int, maxsize: int = 128 ) -> None: assert wrapped.cache_info() == _CacheInfo( hits=hits, misses=misses, maxsize=maxsize, currsize=cache, ) assert wrapped.cache_parameters()["tasks"] == tasks return _check_lru async-lru-2.0.5/tests/test_basic.py000066400000000000000000000126461476560357500173220ustar00rootroot00000000000000import asyncio import platform import sys from functools import _CacheInfo, partial from typing import Callable import pytest from async_lru import _CacheParameters, alru_cache def test_alru_cache_not_callable() -> None: with pytest.raises(NotImplementedError): alru_cache("foo") # type: ignore[call-overload] def test_alru_cache_not_coroutine() -> None: with pytest.raises(RuntimeError): @alru_cache # type: ignore[arg-type] def not_coro(val: int) -> int: return val async def test_alru_cache_deco(check_lru: Callable[..., None]) -> None: @alru_cache async def coro() -> None: pass assert asyncio.iscoroutinefunction(coro) check_lru(coro, hits=0, misses=0, cache=0, tasks=0) awaitable = coro() assert asyncio.iscoroutine(awaitable) await awaitable async def test_alru_cache_deco_called(check_lru: Callable[..., None]) -> None: @alru_cache() async def coro() -> None: pass assert asyncio.iscoroutinefunction(coro) check_lru(coro, hits=0, misses=0, cache=0, tasks=0) awaitable = coro() assert asyncio.iscoroutine(awaitable) await awaitable async def test_alru_cache_fn_called(check_lru: Callable[..., None]) -> None: async def coro() -> None: pass coro_wrapped = alru_cache(coro) assert asyncio.iscoroutinefunction(coro_wrapped) check_lru(coro_wrapped, hits=0, misses=0, cache=0, tasks=0) awaitable = coro_wrapped() assert asyncio.iscoroutine(awaitable) await awaitable async def test_alru_cache_partial() -> None: async def coro(val: int) -> int: return val coro_wrapped1 = alru_cache(coro) assert await coro_wrapped1(1) == 1 coro_wrapped2 = alru_cache(partial(coro, 2)) assert await coro_wrapped2() == 2 async def test_alru_cache_await_same_result_async( check_lru: Callable[..., None] ) -> None: calls = 0 val = object() @alru_cache() async def coro() -> object: nonlocal calls calls += 1 return val coros = [coro() for _ in range(100)] ret = await asyncio.gather(*coros) expected = [val] * 100 assert ret == expected check_lru(coro, hits=99, misses=1, cache=1, tasks=0) assert calls == 1 assert await coro() is val check_lru(coro, hits=100, misses=1, cache=1, tasks=0) async def test_alru_cache_await_same_result_coroutine( check_lru: Callable[..., None] ) -> None: calls = 0 val = object() @alru_cache() async def coro() -> object: nonlocal calls calls += 1 return val coros = [coro() for _ in range(100)] ret = await asyncio.gather(*coros) expected = [val] * 100 assert ret == expected check_lru(coro, hits=99, misses=1, cache=1, tasks=0) assert calls == 1 assert await coro() is val check_lru(coro, hits=100, misses=1, cache=1, tasks=0) async def test_alru_cache_dict_not_shared(check_lru: Callable[..., None]) -> None: async def coro(val: int) -> int: return val coro1 = alru_cache()(coro) coro2 = alru_cache()(coro) ret1 = await coro1(1) check_lru(coro1, hits=0, misses=1, cache=1, tasks=0) ret2 = await coro2(1) check_lru(coro2, hits=0, misses=1, cache=1, tasks=0) assert ret1 == ret2 assert ( coro1._LRUCacheWrapper__cache[1].fut.result() # type: ignore[attr-defined] == coro2._LRUCacheWrapper__cache[1].fut.result() # type: ignore[attr-defined] ) assert coro1._LRUCacheWrapper__cache != coro2._LRUCacheWrapper__cache # type: ignore[attr-defined] assert coro1._LRUCacheWrapper__cache.keys() == coro2._LRUCacheWrapper__cache.keys() # type: ignore[attr-defined] assert coro1._LRUCacheWrapper__cache is not coro2._LRUCacheWrapper__cache # type: ignore[attr-defined] async def test_alru_cache_parameters() -> None: @alru_cache async def coro(val: int) -> int: return val assert coro.cache_parameters() == _CacheParameters( typed=False, maxsize=128, tasks=0, closed=False, ) await coro(1) assert coro.cache_parameters() == _CacheParameters( typed=False, maxsize=128, tasks=0, closed=False, ) async def test_alru_cache_method() -> None: class A: def __init__(self, val: int) -> None: self.val = val @alru_cache async def coro(self) -> int: return self.val a = A(42) assert await a.coro() == 42 assert a.coro.cache_parameters() == _CacheParameters( typed=False, maxsize=128, tasks=0, closed=False, ) @pytest.mark.xfail( sys.version_info[:2] == (3, 9) and platform.python_implementation() != "PyPy", reason="#511", ) async def test_alru_cache_classmethod() -> None: class A: offset = 3 @classmethod @alru_cache async def coro(cls, val: int) -> int: return val + cls.offset assert await A.coro(5) == 8 assert A.coro.cache_parameters() == _CacheParameters( typed=False, maxsize=128, tasks=0, closed=False, ) async def test_invalidate_cache_for_method() -> None: class A: @alru_cache async def coro(self, val: int) -> int: return val a = A() assert await a.coro(42) == 42 assert a.coro.cache_info() == _CacheInfo(0, 1, 128, 1) a.coro.cache_invalidate(42) assert a.coro.cache_info() == _CacheInfo(0, 1, 128, 0) async-lru-2.0.5/tests/test_cache_clear.py000066400000000000000000000027431476560357500204470ustar00rootroot00000000000000import asyncio from typing import Callable from async_lru import alru_cache async def test_cache_clear(check_lru: Callable[..., None]) -> None: @alru_cache() async def coro(val: int) -> int: return val inputs = [1, 2, 3] coros = [coro(v) for v in inputs] ret = await asyncio.gather(*coros) assert ret == inputs check_lru(coro, hits=0, misses=3, cache=3, tasks=0) coro.cache_clear() check_lru(coro, hits=0, misses=0, cache=0, tasks=0) async def test_cache_clear_pending_task() -> None: @alru_cache() async def coro() -> str: await asyncio.sleep(0.5) return "foo" t = asyncio.create_task(coro()) await asyncio.sleep(0) assert len(coro._LRUCacheWrapper__tasks) == 1 # type: ignore[attr-defined] inner_task = next(iter(coro._LRUCacheWrapper__tasks)) # type: ignore[attr-defined] assert not inner_task.done() coro.cache_clear() await inner_task assert await t == "foo" assert inner_task.done() async def test_cache_clear_ttl_callback(check_lru: Callable[..., None]) -> None: @alru_cache(ttl=0.5) async def coro() -> str: return "foo" await coro() assert len(coro._LRUCacheWrapper__cache) == 1 # type: ignore[attr-defined] cache_item = next(iter(coro._LRUCacheWrapper__cache.values())) # type: ignore[attr-defined] assert not cache_item.later_call.cancelled() coro.cache_clear() assert cache_item.later_call.cancelled() await asyncio.sleep(0.5) async-lru-2.0.5/tests/test_cache_info.py000066400000000000000000000017471476560357500203170ustar00rootroot00000000000000import asyncio from typing import Callable from async_lru import alru_cache async def test_cache_info(check_lru: Callable[..., None]) -> None: @alru_cache(maxsize=4) async def coro(val: int) -> int: return val inputs = [1, 2, 3] coros = [coro(v) for v in inputs] ret = await asyncio.gather(*coros) assert ret == inputs check_lru(coro, hits=0, misses=3, cache=3, tasks=0, maxsize=4) coro.cache_clear() check_lru(coro, hits=0, misses=0, cache=0, tasks=0, maxsize=4) inputs = [1, 1, 1] coros = [coro(v) for v in inputs] ret = await asyncio.gather(*coros) assert ret == inputs check_lru(coro, hits=2, misses=1, cache=1, tasks=0, maxsize=4) coro.cache_clear() check_lru(coro, hits=0, misses=0, cache=0, tasks=0, maxsize=4) inputs = [1, 2, 3, 4] * 2 coros = [coro(v) for v in inputs] ret = await asyncio.gather(*coros) assert ret == inputs check_lru(coro, hits=4, misses=4, cache=4, tasks=0, maxsize=4) async-lru-2.0.5/tests/test_cache_invalidate.py000066400000000000000000000047701476560357500215030ustar00rootroot00000000000000import asyncio from typing import Callable from async_lru import alru_cache async def test_cache_invalidate(check_lru: Callable[..., None]) -> None: @alru_cache() async def coro(val: int) -> int: return val inputs = [1, 2, 3] coro.cache_invalidate(1) coro.cache_invalidate(2) coro.cache_invalidate(3) coros = [coro(v) for v in inputs] ret = await asyncio.gather(*coros) assert ret == inputs check_lru(coro, hits=0, misses=3, cache=3, tasks=0) coro.cache_invalidate(1) check_lru(coro, hits=0, misses=3, cache=2, tasks=0) coro.cache_invalidate(2) check_lru(coro, hits=0, misses=3, cache=1, tasks=0) coro.cache_invalidate(3) check_lru(coro, hits=0, misses=3, cache=0, tasks=0) inputs = [1, 2, 3] coros = [coro(v) for v in inputs] ret = await asyncio.gather(*coros) assert ret == inputs check_lru(coro, hits=0, misses=6, cache=3, tasks=0) async def test_cache_invalidate_multiple_args(check_lru: Callable[..., None]) -> None: @alru_cache() async def coro(*args: int) -> int: return len(args) for i, size in enumerate(range(10)): args = tuple(range(size)) ret = await coro(*args) assert ret == size check_lru(coro, hits=0, misses=i + 1, cache=1, tasks=0) coro.cache_invalidate(*args) check_lru(coro, hits=0, misses=i + 1, cache=0, tasks=0) for size in range(10): args = tuple(range(size)) ret = await coro(*args) assert ret == size check_lru(coro, hits=0, misses=20, cache=10, tasks=0) async def test_cache_invalidate_multiple_args_different_order( check_lru: Callable[..., None] ) -> None: @alru_cache() async def coro(*args: int) -> int: return len(args) for i, size in enumerate(range(2, 10)): args = tuple(range(size)) rev_args = tuple(reversed(args)) ret = await coro(*args) assert ret == size check_lru(coro, hits=0, misses=2 * i + 1, cache=i + 1, tasks=0) ret = await coro(*rev_args) # The reversed args should be a miss check_lru(coro, hits=0, misses=2 * i + 2, cache=i + 2, tasks=0) coro.cache_invalidate(*rev_args) # The reversed args should be invalidated check_lru(coro, hits=0, misses=2 * i + 2, cache=i + 1, tasks=0) for i, size in enumerate(range(2, 10)): args = tuple(range(size)) ret = await coro(*args) assert ret == size check_lru(coro, hits=i + 1, misses=16, cache=8, tasks=0) async-lru-2.0.5/tests/test_close.py000066400000000000000000000016661476560357500173460ustar00rootroot00000000000000import asyncio from typing import Callable import pytest from async_lru import alru_cache async def test_cache_close(check_lru: Callable[..., None]) -> None: @alru_cache() async def coro(val: int) -> int: await asyncio.sleep(0.2) return val assert not coro.cache_parameters()["closed"] inputs = [1, 2, 3, 4, 5] coros = [coro(v) for v in inputs] gather = asyncio.gather(*coros) await asyncio.sleep(0.1) check_lru(coro, hits=0, misses=5, cache=5, tasks=5) close = coro.cache_close() check_lru(coro, hits=0, misses=5, cache=5, tasks=5) await close check_lru(coro, hits=0, misses=5, cache=0, tasks=0) assert coro.cache_parameters()["closed"] with pytest.raises(asyncio.CancelledError): await gather check_lru(coro, hits=0, misses=5, cache=0, tasks=0) assert coro.cache_parameters()["closed"] # double call is no-op await coro.cache_close() async-lru-2.0.5/tests/test_exception.py000066400000000000000000000025571476560357500202370ustar00rootroot00000000000000import asyncio import gc import sys from typing import Callable import pytest from async_lru import alru_cache async def test_alru_exception(check_lru: Callable[..., None]) -> None: @alru_cache() async def coro(val: int) -> None: 1 / 0 inputs = [1, 1, 1] coros = [coro(v) for v in inputs] ret = await asyncio.gather(*coros, return_exceptions=True) check_lru(coro, hits=2, misses=1, cache=0, tasks=0) for item in ret: assert isinstance(item, ZeroDivisionError) with pytest.raises(ZeroDivisionError): await coro(1) check_lru(coro, hits=2, misses=2, cache=0, tasks=0) @pytest.mark.xfail( reason="Memory leak is not fixed for PyPy3.9", condition=sys.implementation.name == "pypy", ) async def test_alru_exception_reference_cleanup(check_lru: Callable[..., None]) -> None: class CustomClass: ... @alru_cache() async def coro(val: int) -> None: _ = CustomClass() # object we are verifying not to leak 1 / 0 coros = [coro(v) for v in range(1000)] await asyncio.gather(*coros, return_exceptions=True) check_lru(coro, hits=0, misses=1000, cache=0, tasks=0) await asyncio.sleep(0.00001) gc.collect() assert ( len([obj for obj in gc.get_objects() if isinstance(obj, CustomClass)]) == 0 ), "Only objects in the cache should be left in memory." async-lru-2.0.5/tests/test_internals.py000066400000000000000000000122771476560357500202400ustar00rootroot00000000000000import asyncio from functools import partial from unittest import mock import pytest from async_lru import _LRUCacheWrapper async def test_done_callback_cancelled() -> None: wrapped = _LRUCacheWrapper(mock.ANY, None, False, None) loop = asyncio.get_running_loop() task = loop.create_future() fut = loop.create_future() key = 1 task.add_done_callback(partial(wrapped._task_done_callback, fut, key)) wrapped._LRUCacheWrapper__tasks.add(task) # type: ignore[attr-defined] task.cancel() await asyncio.sleep(0) assert fut.cancelled() async def test_done_callback_exception() -> None: wrapped = _LRUCacheWrapper(mock.ANY, None, False, None) loop = asyncio.get_running_loop() task = loop.create_future() fut = loop.create_future() key = 1 task.add_done_callback(partial(wrapped._task_done_callback, fut, key)) wrapped._LRUCacheWrapper__tasks.add(task) # type: ignore[attr-defined] exc = ZeroDivisionError() task.set_exception(exc) await asyncio.sleep(0) with pytest.raises(ZeroDivisionError): await fut with pytest.raises(ZeroDivisionError): fut.result() assert fut.exception() is exc async def test_done_callback() -> None: wrapped = _LRUCacheWrapper(mock.ANY, None, False, None) loop = asyncio.get_running_loop() task = loop.create_future() key = 1 fut = loop.create_future() task.add_done_callback(partial(wrapped._task_done_callback, fut, key)) wrapped._LRUCacheWrapper__tasks.add(task) # type: ignore[attr-defined] task.set_result(1) await asyncio.sleep(0) assert fut.result() == 1 async def test_cache_invalidate_typed() -> None: wrapped = _LRUCacheWrapper(mock.AsyncMock(return_value=1), None, True, None) from_cache = wrapped.cache_invalidate(1, a=1) assert not from_cache await wrapped(1, a=1) from_cache = wrapped.cache_invalidate(1, a=1) assert from_cache assert wrapped.cache_info().currsize == 0 from_cache = wrapped.cache_invalidate(1.0, a=1) assert not from_cache assert wrapped.cache_info().currsize == 0 await wrapped(1.0, a=1) assert wrapped.cache_info().currsize == 1 from_cache = wrapped.cache_invalidate(1.0, a=1) assert from_cache async def test_cache_invalidate_not_typed() -> None: wrapped = _LRUCacheWrapper(mock.AsyncMock(return_value=1), None, False, None) from_cache = wrapped.cache_invalidate(1, a=1) assert not from_cache await wrapped(1, a=1) assert wrapped.cache_info().currsize == 1 from_cache = wrapped.cache_invalidate(1, a=1) assert from_cache assert wrapped.cache_info().currsize == 0 await wrapped(1, a=1) assert wrapped.cache_info().currsize == 1 from_cache = wrapped.cache_invalidate(1.0, a=1) assert from_cache assert wrapped.cache_info().currsize == 0 async def test_cache_clear() -> None: wrapped = _LRUCacheWrapper(mock.AsyncMock(return_value=1), None, True, None) await wrapped(123) assert wrapped.cache_info().hits == 0 assert wrapped.cache_info().misses == 1 assert wrapped.cache_info().currsize == 1 assert wrapped.cache_parameters()["tasks"] == 0 await wrapped(123) assert wrapped.cache_info().hits == 1 assert wrapped.cache_info().misses == 1 assert wrapped.cache_info().currsize == 1 assert wrapped.cache_parameters()["tasks"] == 0 wrapped.cache_clear() assert wrapped.cache_info().hits == 0 assert wrapped.cache_info().misses == 0 assert wrapped.cache_info().currsize == 0 assert wrapped.cache_parameters()["tasks"] == 0 def test_cache_info() -> None: wrapped = _LRUCacheWrapper(mock.ANY, 3, True, None) assert (0, 0, 3, 0) == wrapped.cache_info() wrapped._LRUCacheWrapper__cache[1] = 1 # type: ignore[attr-defined] assert (0, 0, 3, 1) == wrapped.cache_info() wrapped._LRUCacheWrapper__hits = 2 # type: ignore[attr-defined] wrapped._LRUCacheWrapper__misses = 3 # type: ignore[attr-defined] wrapped._LRUCacheWrapper__cache[2] = 2 # type: ignore[attr-defined] assert (2, 3, 3, 2) == wrapped.cache_info() async def test_cache_hit() -> None: wrapped = _LRUCacheWrapper(mock.AsyncMock(return_value=1), None, True, None) await wrapped(1) assert wrapped.cache_info().hits == 0 assert wrapped.cache_info().misses == 1 await wrapped(1) assert wrapped.cache_info().hits == 1 assert wrapped.cache_info().misses == 1 await wrapped(1) assert wrapped.cache_info().hits == 2 assert wrapped.cache_info().misses == 1 async def test_cache_miss() -> None: wrapped = _LRUCacheWrapper(mock.AsyncMock(return_value=1), None, True, None) await wrapped(1) assert wrapped.cache_info().hits == 0 assert wrapped.cache_info().misses == 1 await wrapped(2) assert wrapped.cache_info().hits == 0 assert wrapped.cache_info().misses == 2 await wrapped(3) assert wrapped.cache_info().hits == 0 assert wrapped.cache_info().misses == 3 async def test_forbid_call_closed() -> None: wrapped = _LRUCacheWrapper(mock.AsyncMock(return_value=1), None, True, None) wrapped._LRUCacheWrapper__closed = True # type: ignore[attr-defined] with pytest.raises(RuntimeError): await wrapped(123) async-lru-2.0.5/tests/test_partialmethod.py000066400000000000000000000022331476560357500210650ustar00rootroot00000000000000import asyncio from functools import partial, partialmethod from typing import Callable from async_lru import alru_cache async def test_partialmethod_basic(check_lru: Callable[..., None]) -> None: class Obj: async def _coro(self, val: int) -> int: return val coro = alru_cache(partialmethod(_coro, 2)) obj = Obj() coros = [obj.coro() for _ in range(5)] check_lru(obj.coro, hits=0, misses=0, cache=0, tasks=0) ret = await asyncio.gather(*coros) check_lru(obj.coro, hits=4, misses=1, cache=1, tasks=0) assert ret == [2, 2, 2, 2, 2] async def test_partialmethod_partial(check_lru: Callable[..., None]) -> None: class Obj: def __init__(self) -> None: self.coro = alru_cache(partial(self._coro, 2)) async def __coro(self, val1: int, val2: int) -> int: return val1 + val2 _coro = partialmethod(__coro, 1) obj = Obj() coros = [obj.coro() for _ in range(5)] check_lru(obj.coro, hits=0, misses=0, cache=0, tasks=0) ret = await asyncio.gather(*coros) check_lru(obj.coro, hits=4, misses=1, cache=1, tasks=0) assert ret == [3, 3, 3, 3, 3] async-lru-2.0.5/tests/test_size.py000066400000000000000000000042141476560357500172030ustar00rootroot00000000000000import asyncio from typing import Callable from async_lru import alru_cache async def test_alru_cache_removing_lru_keys(check_lru: Callable[..., None]) -> None: @alru_cache(maxsize=3) async def coro(val: int) -> int: return val for i, v in enumerate([3, 4, 5]): await coro(v) check_lru(coro, hits=0, misses=i + 1, cache=i + 1, tasks=0, maxsize=3) check_lru(coro, hits=0, misses=3, cache=3, tasks=0, maxsize=3) assert list(coro._LRUCacheWrapper__cache) == [3, 4, 5] # type: ignore[attr-defined] for v in [3, 2, 1]: await coro(v) check_lru(coro, hits=1, misses=5, cache=3, tasks=0, maxsize=3) assert list(coro._LRUCacheWrapper__cache) == [3, 2, 1] # type: ignore[attr-defined] async def test_alru_cache_removing_lru_keys_with_full_displacement( check_lru: Callable[..., None] ) -> None: @alru_cache(maxsize=3) async def coro(val: int) -> int: return val for i, v in enumerate([3, 4, 5]): await coro(v) check_lru(coro, hits=0, misses=i + 1, cache=i + 1, tasks=0, maxsize=3) check_lru(coro, hits=0, misses=3, cache=3, tasks=0, maxsize=3) assert list(coro._LRUCacheWrapper__cache) == [3, 4, 5] # type: ignore[attr-defined] for v in [1, 2, 3]: await coro(v) check_lru(coro, hits=0, misses=6, cache=3, tasks=0, maxsize=3) assert list(coro._LRUCacheWrapper__cache) == [1, 2, 3] # type: ignore[attr-defined] async def test_alru_cache_none_max_size(check_lru: Callable[..., None]) -> None: @alru_cache(maxsize=None) async def coro(val: int) -> int: return val inputs = [1, 2, 3, 4] * 2 coros = [coro(v) for v in inputs] ret = await asyncio.gather(*coros) check_lru(coro, hits=4, misses=4, cache=4, tasks=0, maxsize=None) assert ret == inputs async def test_alru_cache_zero_max_size(check_lru: Callable[..., None]) -> None: @alru_cache(maxsize=0) async def coro(val: int) -> int: return val inputs = [1, 2, 3, 4] * 2 coros = [coro(v) for v in inputs] ret = await asyncio.gather(*coros) check_lru(coro, hits=0, misses=8, cache=0, tasks=0, maxsize=0) assert ret == inputs async-lru-2.0.5/tests/test_ttl.py000066400000000000000000000043361476560357500170410ustar00rootroot00000000000000import asyncio from typing import Callable from async_lru import alru_cache async def test_ttl_infinite_cache(check_lru: Callable[..., None]) -> None: @alru_cache(maxsize=None, ttl=0.1) async def coro(val: int) -> int: return val assert await coro(1) == 1 check_lru(coro, hits=0, misses=1, cache=1, tasks=0, maxsize=None) await asyncio.sleep(0.0) assert await coro(1) == 1 check_lru(coro, hits=1, misses=1, cache=1, tasks=0, maxsize=None) await asyncio.sleep(0.2) # cache is clear after ttl expires check_lru(coro, hits=1, misses=1, cache=0, tasks=0, maxsize=None) assert await coro(1) == 1 check_lru(coro, hits=1, misses=2, cache=1, tasks=0, maxsize=None) async def test_ttl_limited_cache(check_lru: Callable[..., None]) -> None: @alru_cache(maxsize=1, ttl=0.1) async def coro(val: int) -> int: return val assert await coro(1) == 1 check_lru(coro, hits=0, misses=1, cache=1, tasks=0, maxsize=1) assert await coro(2) == 2 check_lru(coro, hits=0, misses=2, cache=1, tasks=0, maxsize=1) await asyncio.sleep(0) assert await coro(2) == 2 check_lru(coro, hits=1, misses=2, cache=1, tasks=0, maxsize=1) assert await coro(1) == 1 check_lru(coro, hits=1, misses=3, cache=1, tasks=0, maxsize=1) async def test_ttl_with_explicit_invalidation(check_lru: Callable[..., None]) -> None: @alru_cache(maxsize=None, ttl=0.2) async def coro(val: int) -> int: return val assert await coro(1) == 1 check_lru(coro, hits=0, misses=1, cache=1, tasks=0, maxsize=None) coro.cache_invalidate(1) check_lru(coro, hits=0, misses=1, cache=0, tasks=0, maxsize=None) await asyncio.sleep(0.1) assert await coro(1) == 1 check_lru(coro, hits=0, misses=2, cache=1, tasks=0, maxsize=None) await asyncio.sleep(0.1) # cache is not cleared after ttl expires because invalidate also should clear # the invalidation by timeout check_lru(coro, hits=0, misses=2, cache=1, tasks=0, maxsize=None) async def test_ttl_concurrent() -> None: @alru_cache(maxsize=1, ttl=1) async def coro(val: int) -> int: return val results = await asyncio.gather(*(coro(i) for i in range(2))) assert results == list(range(2)) async-lru-2.0.5/tox.ini000066400000000000000000000004561476560357500147750ustar00rootroot00000000000000[tox] envlist = py3{8,9,10,11} skip_missing_interpreters = True [testenv] deps = -r{toxinidir}/requirements.txt commands = flake8 --show-source async_lru isort --check-only async_lru --diff flake8 --show-source tests isort --check-only -rc tests --diff {envpython} -m pytest