pax_global_header00006660000000000000000000000064147651713270014527gustar00rootroot0000000000000052 comment=a9d5a1a8a9a7dd9686861117364a3d4cd786cd5b limits-4.4.1/000077500000000000000000000000001476517132700130365ustar00rootroot00000000000000limits-4.4.1/.coveragerc000066400000000000000000000004271476517132700151620ustar00rootroot00000000000000[run] omit = /**/limits/_version* /**/tests/* setup.py versioneer.py [report] exclude_lines = pragma: no cover noqa raise NotImplementedError @overload @abstractmethod if TYPE_CHECKING if typing.TYPE_CHECKING class .*\bProtocol\): limits-4.4.1/.devcontainer/000077500000000000000000000000001476517132700155755ustar00rootroot00000000000000limits-4.4.1/.devcontainer/devcontainer.json000066400000000000000000000023241476517132700211520ustar00rootroot00000000000000// For format details, see https://aka.ms/devcontainer.json. For config options, see the // README at: https://github.com/devcontainers/templates/tree/main/src/python { "name": "Python 3", // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye", "workspaceMount": "source=${localWorkspaceFolder},target=/workspace,type=bind", "workspaceFolder": "/workspace", // Features to add to the dev container. More info: https://containers.dev/features. "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": {} }, "mounts": [ "type=bind,source=${localEnv:HOME}${localEnv:USERPROFILE}/.ssh,target=${localEnv:HOME}${localEnv:USERPROFILE}/.ssh,readonly" ], // Use 'forwardPorts' to make a list of ports inside the container available locally. // "forwardPorts": [], // Use 'postCreateCommand' to run commands after the container is created. "postCreateCommand": "pip install --no-binary protobuf -r requirements/ci.txt" // Configure tool-specific properties. // "customizations": {}, // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. // "remoteUser": "root" } limits-4.4.1/.gitattributes000066400000000000000000000001121476517132700157230ustar00rootroot00000000000000flask_ratelimits/_version.py export-subst limits/_version.py export-subst limits-4.4.1/.github/000077500000000000000000000000001476517132700143765ustar00rootroot00000000000000limits-4.4.1/.github/FUNDING.yml000066400000000000000000000000521476517132700162100ustar00rootroot00000000000000github: alisaifee open_collective: limits limits-4.4.1/.github/ISSUE_TEMPLATE/000077500000000000000000000000001476517132700165615ustar00rootroot00000000000000limits-4.4.1/.github/ISSUE_TEMPLATE/bug-report.md000066400000000000000000000012501476517132700211670ustar00rootroot00000000000000--- name: Bug Report about: Submit a bug report labels: 'bug' --- ## Expected Behaviour ## Current Behaviour ## Steps to Reproduce 1. 1. 1. 1. ## Your Environment - limits version: - Operating system: limits-4.4.1/.github/ISSUE_TEMPLATE/feature.md000066400000000000000000000005131476517132700205350ustar00rootroot00000000000000--- name: Feature or Enhancement about: Propose a new feature or enhancement labels: 'enhancement' --- ## Expected Behaviour limits-4.4.1/.github/dependabot.yml000066400000000000000000000006551476517132700172340ustar00rootroot00000000000000# To get started with Dependabot version updates, you'll need to specify which # package ecosystems to update and where the package manifests are located. # Please see the documentation for all configuration options: # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates version: 2 updates: - package-ecosystem: "pip" directory: "/" schedule: interval: "daily" limits-4.4.1/.github/workflows/000077500000000000000000000000001476517132700164335ustar00rootroot00000000000000limits-4.4.1/.github/workflows/compatibility.yml000066400000000000000000000076251476517132700220410ustar00rootroot00000000000000name: Compatibility on: create: tags: ['*'] push: branches: ['master'] schedule: - cron: 0 23 * * * jobs: test: runs-on: ubuntu-latest strategy: fail-fast: false matrix: service_version: ["ALL_LATEST=true"] async_redis_implementation: ["coredis"] marker: [not integration] include: - service_version: "LIMITS_REDIS_SERVER_SSL_VERSION=7.4" marker: "redis or redis_cluster" - service_version: "LIMITS_REDIS_SERVER_SSL_VERSION=7.2" marker: "redis or redis_cluster" - service_version: "LIMITS_REDIS_SERVER_SSL_VERSION=7.0" marker: "redis or redis_cluster" - service_version: "LIMITS_REDIS_SERVER_SSL_VERSION=6.2.6" marker: "redis or redis_cluster" - service_version: "LIMITS_REDIS_SERVER_VERSION=7.4" marker: "redis or redis_cluster" - service_version: "LIMITS_REDIS_SERVER_VERSION=7.4" async_redis_implementation: "redispy" marker: "redis or redis_cluster" - service_version: "LIMITS_REDIS_SERVER_VERSION=7.2" marker: "redis or redis_cluster" - service_version: "LIMITS_REDIS_SERVER_VERSION=7.0" marker: "redis or redis_cluster" - service_version: "LIMITS_REDIS_SERVER_VERSION=6.2.6" marker: "redis or redis_cluster" - service_version: "LIMITS_REDIS_SENTINEL_SERVER_VERSION=7.2" marker: "redis_sentinel" - service_version: "LIMITS_REDIS_SENTINEL_SERVER_VERSION=7.2" marker: "redis_sentinel" async_redis_implementation: "redispy" - service_version: "LIMITS_REDIS_SENTINEL_SERVER_VERSION=7.0" marker: "redis_sentinel" - service_version: "LIMITS_REDIS_SENTINEL_SERVER_VERSION=6.2.6" marker: "redis_sentinel" - service_version: "LIMITS_MONGODB_SERVER_VERSION=8.0" marker: "mongodb" - service_version: "LIMITS_MONGODB_SERVER_VERSION=7.0" marker: "mongodb" - service_version: "LIMITS_MONGODB_SERVER_VERSION=6.0" marker: "mongodb" - service_version: "LIMITS_MONGODB_SERVER_VERSION=5.0" marker: "mongodb" - service_version: "LIMITS_MEMCACHED_SERVER_VERSION=1.6.15" marker: "memcached" - service_version: "LIMITS_MEMCACHED_SERVER_VERSION=1.6.6" marker: "memcached" - service_version: "LIMITS_MEMCACHED_SERVER_VERSION=1.5.16" marker: "memcached" - service_version: "LIMITS_MEMCACHED_SERVER_VERSION=1.4.34" marker: "memcached" - service_version: "LIMITS_ETCD_SERVER_VERSION=3.5" marker: "etcd" - service_version: "LIMITS_ETCD_SERVER_VERSION=3.4" marker: "etcd" - service_version: "LIMITS_ETCD_SERVER_VERSION=3.3" marker: "etcd" - service_version: "LIMITS_VALKEY_SERVER_VERSION=7.2" marker: "valkey" - service_version: "LIMITS_VALKEY_SERVER_VERSION=8.0" marker: "valkey" steps: - uses: actions/checkout@v4 - name: Cache dependencies uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements/**') }} restore-keys: | ${{ runner.os }}-pip- - name: Set up Python uses: actions/setup-python@v5 with: python-version: "3.12" - name: Install dependencies run: | python -m pip install -U pip python -m pip install -U setuptools wheel pip install --no-binary protobuf -r requirements/ci.txt - name: Tests env: CI: "True" ASYNC_REDIS_IMPLEMENTATION: "${{ matrix.async_redis_implementation }}" run: | eval "export ${{ matrix.service_version }}" py.test -m "${{ matrix.marker }} and not benchmark" --cov-report=xml --cov-branch --max-runs=3 limits-4.4.1/.github/workflows/main.yml000066400000000000000000000155261476517132700201130ustar00rootroot00000000000000name: CI on: [push, pull_request] jobs: lint: runs-on: ubuntu-latest strategy: matrix: python-version: ["3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v4 - name: Cache dependencies uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements/**') }} restore-keys: | ${{ runner.os }}-pip- - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install -U pip python -m pip install -U setuptools wheel pip install --no-binary protobuf -r requirements/ci.txt - name: Lint with ruff run: | ruff check --select I limits tests ruff format --check limits tests ruff check limits tests - name: Check types run: | mypy limits test: strategy: fail-fast: false matrix: python-version: ["3.10"] marker: [not (integration or benchmark)] os: [ubuntu-latest] async_redis_implementation: ["coredis"] include: - python-version: "3.10" marker: 'not benchmark' os: ubuntu-latest - python-version: "3.11" marker: 'not benchmark' os: ubuntu-latest - python-version: "3.12" marker: 'not benchmark' os: ubuntu-latest - python-version: "3.12" marker: 'not benchmark and (redis or redis_cluster or redis_sentinel) and asyncio' os: ubuntu-latest async_redis_implementation: "redispy" - python-version: "3.13" marker: 'not benchmark and (redis or redis_cluster or redis_sentinel) and asyncio' os: ubuntu-latest async_redis_implementation: "redispy" - python-version: "3.13" marker: 'not benchmark and not (asyncio and memcached)' os: ubuntu-latest runs-on: "${{ matrix.os }}" steps: - uses: actions/checkout@v4 - name: Cache dependencies uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements/**') }} restore-keys: | ${{ runner.os }}-pip- - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install -U pip python -m pip install -U setuptools wheel pip install --no-binary protobuf -r requirements/ci.txt - name: Tests env: CI: "True" ASYNC_REDIS_IMPLEMENTATION: "${{ matrix.async_redis_implementation }}" run: | py.test -m "${{ matrix.marker }}" --cov-report=xml --cov-branch --max-runs=3 - name: Upload coverage to Codecov uses: codecov/codecov-action@v5 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} benchmark: needs: [test] runs-on: ubuntu-latest strategy: matrix: python-version: ["3.11"] steps: - uses: actions/checkout@v4 - name: Cache dependencies uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements/**') }} restore-keys: | ${{ runner.os }}-pip- - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install "pip<22" python -m pip install --upgrade setuptools wheel pip install --no-binary protobuf -r requirements/ci.txt - name: Benchmark env: CI: "True" run: | py.test -W ignore -m "benchmark" --benchmark-min-rounds=1000 --benchmark-histogram=benchmarks/benchmark - name: Upload benchmark results uses: actions/upload-artifact@v4 with: name: benchmark path: benchmarks/* build_wheels: needs: [lint] name: Build wheel runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Build wheels run: | python -m pip install build python -m build --wheel - uses: actions/upload-artifact@v4 with: name: wheels path: ./dist/*.whl build_sdist: needs: [lint] name: Build source distribution runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Build sdist run: | pipx run build --sdist - uses: actions/upload-artifact@v4 with: name: src_dist path: dist/*.tar.gz upload_pypi: needs: [test, build_wheels, build_sdist] runs-on: ubuntu-latest if: github.ref == 'refs/heads/master' permissions: id-token: write steps: - uses: actions/download-artifact@v4 with: name: wheels path: dist - uses: actions/download-artifact@v4 with: name: src_dist path: dist - uses: pypa/gh-action-pypi-publish@release/v1 with: repository_url: https://test.pypi.org/legacy/ skip_existing: true upload_pypi_release: needs: [test, benchmark, build_wheels, build_sdist] runs-on: ubuntu-latest if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') permissions: id-token: write steps: - uses: actions/download-artifact@v4 with: name: wheels path: dist - uses: actions/download-artifact@v4 with: name: src_dist path: dist - uses: pypa/gh-action-pypi-publish@release/v1 github_release: needs: [upload_pypi_release] name: Create Release runs-on: ubuntu-latest if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: - name: Checkout code uses: actions/checkout@v4 with: fetch-depth: 0 - name: Download wheel artifacts uses: actions/download-artifact@v4 with: name: wheels path: dist - name: Download source artifacts uses: actions/download-artifact@v4 with: name: src_dist path: dist - name: Download benchmark results uses: actions/download-artifact@v4 with: name: benchmark path: benchmarks - name: Generate release notes run: | ./scripts/github_release_notes.sh > release_notes.md - name: Create Release uses: ncipollo/release-action@v1 with: artifacts: "dist/*,benchmarks/*" bodyFile: release_notes.md token: ${{ secrets.GITHUB_TOKEN }} limits-4.4.1/.gitignore000066400000000000000000000002031476517132700150210ustar00rootroot00000000000000*.pyc *.log cover/* .mypy_cache/* .coverage* .test_env .tool-versions .idea build/ dist/ htmlcov *egg-info* .python-version .*.swp limits-4.4.1/.readthedocs.yml000066400000000000000000000005211476517132700161220ustar00rootroot00000000000000version: 2 build: os: ubuntu-20.04 tools: python: "3.13" # You can also specify other tool versions: # nodejs: "16" # rust: "1.55" # golang: "1.17" # Build documentation in the docs/ directory with Sphinx sphinx: configuration: doc/source/conf.py python: install: - requirements: requirements/docs.txt limits-4.4.1/CLASSIFIERS000066400000000000000000000007451476517132700145360ustar00rootroot00000000000000Development Status :: 5 - Production/Stable Intended Audience :: Developers License :: OSI Approved :: MIT License Operating System :: MacOS Operating System :: POSIX :: Linux Operating System :: OS Independent Topic :: Software Development :: Libraries :: Python Modules Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Programming Language :: Python :: 3.13 Programming Language :: Python :: Implementation :: PyPy limits-4.4.1/CODE_OF_CONDUCT.md000066400000000000000000000121221476517132700156330ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. limits-4.4.1/CONTRIBUTIONS.rst000066400000000000000000000004371476517132700156360ustar00rootroot00000000000000Contributors ============ - `Timothee Groleau `_ - `Zehua Liu `_ - `David Czarnecki `_ - `Laurent Savaete `_ - `Antoine Merino `_ limits-4.4.1/HISTORY.rst000066400000000000000000000302731476517132700147360ustar00rootroot00000000000000.. :changelog: Changelog ========= v4.4.1 ------ Release Date: 2025-03-14 * Documentation * Fix deprecation documentation for etcd v4.4 ---- Release Date: 2025-03-14 * Compatibility * Deprecate support for ``etcd`` v4.3 ---- Release Date: 2025-03-14 * Feature * Add support for ``valkey://`` schemas and using ``valkey-py`` dependency * Compatibility * Drop support for python 3.9 * Improve typing to use python 3.10+ features v4.2 ---- Release Date: 2025-03-11 * Feature * Add support for using ``redis-py`` instead of ``coredis`` which asyncio + redis storages v4.1 ---- Release Date: 2025-03-07 * Feature * Add new Sliding Window Counter strategy * Deprecation * Deprecate the Fixed window with elastic expiry strategy * Documentation * Re-write strategy documentation with concrete examples v4.0.1 ------ Release Date: 2025-01-16 Security * Change pypi release to use trusted publishing v4.0.0 ------ Release Date: 2025-01-05 * Breaking change * Change definition of ``reset_time`` in ``get_window_stats`` to use a precise floating point value instead of truncating to the previous second. v3.14.1 ------- Release Date: 2024-11-30 * Chore * Fix benchmark artifact upload/download issue during release creation v3.14.0 ------- Release Date: 2024-11-29 * Feature * Allow custom collection names in mongodb storage * Compatibility * Add support for python 3.13 * Drop support for python 3.8 * Deprecations * Remove fallback support to use redis-py-cluster v3.13.0 ------- Release Date: 2024-06-22 * Feature * Add ``cost`` parameter to ``test`` methods in strategies. v3.12.0 ------- Release Date: 2024-05-12 * Enhancements * Lazily initialize pymongo client * Documentation * Add django-ratelimiter in docs * Chores * Update development dependencies * Update github actions to latest v3.11.0 ------- Release Date: 2024-04-20 * Compatibility * Add support for python 3.12 v3.10.1 ------- Release Date: 2024-03-17 * Compatibility * Relax dependency constraint on packaging v3.10.0 ------- Release Date: 2024-03-08 * Bug Fix * Fix incorrect mapping of coredis exceptions * Fix calculation of reset_time v3.9.0 ------ Release Date: 2024-02-17 * Bug Fix * Remove excessively low defaults for mongodb storage and instead delegate to the underlying dependency (pymongo, motor) v3.8.0 ------ Release Date: 2024-02-14 * Features * Add option to wrap storage errors with a ``StorageError`` exception v3.7.0 ------ Release Date: 2023-11-24 * Features * Ensure rate limit keys in redis use are prefixed with a `LIMITS` prefix. This allows for resetting all keys generated by the library without implicit knowledge of the key structure. v3.6.0 ------ Release Date: 2023-08-31 * Bug Fix * Remove default socket timeout from mongo storage * Ensure _version.py has stable content when generated using `git archive` from a tag regardless of when it is run. * Compatibility * Remove references to python 3.7 * Remove unnecessary setuptools dependency v3.5.0 ------ Release Date: 2023-05-16 * Bug Fix * Handle ``cost`` > 8000 when using redis * Remove arbitrary default timeout for redis+sentinel v3.4.0 ------ Release Date: 2023-04-17 * Bug Fix * Remove use of weakreferences to storages in strategy classes as this was not documented or required and led to usability issues. * Chores * Update documentation dependencies * Remove unused gcra lua script v3.3.1 ------ Release Date: 2023-03-22 * Compatibility * Block incompatible versions of redis-py * Chores * Force error on warnings in tests v3.3.0 ------ Release Date: 2023-03-20 * Compatibility * Remove deprecated use of `pkg_resources` and switch to `importlib_resource` * Chores * Update documentation dependencies * Update github actions versions v3.2.0 ------ Release Date: 2023-01-24 * Bug Fix * Fix handling of authentication details in storage url of redis cluster * Chores * Add test coverage for redis cluster with auth required v3.1.6 ------ Release Date: 2023-01-16 * Bug Fix * Disallow acquiring amounts > limit in moving window * Usability * Use a named tuple for the response from `RateLimiter.get_window_stats` v3.1.5 ------ Release Date: 2023-01-12 * Performance * Reduce rpc calls to etcd for counter increment * Compatibility * Relax version requirements for packaging dependency * Chores * Improve benchmark outputs * Improve documentation for etcd v3.1.4 ------ Release Date: 2023-01-06 * Chores * Fix benchmark result artifact capture v3.1.3 ------ Release Date: 2023-01-06 * Chores * Fix benchmark result artifact capture v3.1.2 ------ Release Date: 2023-01-06 * Chores * Collapse benchmark & ci workflows v3.1.1 ------ Release Date: 2023-01-06 * Chores * Fix compatibility tests for etcd in CI * Improve visual identifiers of tests * Add benchmark tests in CI v3.1.0 ------ Release Date: 2023-01-05 * Compatibility * Increase minimum version of pymongo to 4.1 * Chores * Refactor storage tests * Improve test coverage across python versions in CI v3.0.0 ------ Release Date: 2023-01-04 * Features * Added etcd storage support for fixed window strategies * Compatibility * Removed deprecated GAE Memcached storage * Updated minimum dependencies for mongodb * Updated dependency for async memcached on python 3.11 v2.8.0 ------ Release Date: 2022-12-23 * Chores * Make rate limit items hashable * Update test certificates v2.7.2 ------ Release Date: 2022-12-11 * Compatibility Updates * Update documentation dependencies * Relax version constraint for packaging dependency * Bump CI to use python 3.11 final v2.7.1 ------ Release Date: 2022-10-20 * Compatibility Updates * Increase pymemcached dependency range to in include 4.x * Add python 3.11 rc2 to CI v2.7.0 ------ Release Date: 2022-07-16 * Compatibility Updates * Update :pypi:`coredis` requirements to include 4.x versions * Remove CI / support for redis < 6.0 * Remove python 3.7 from CI * Add redis 7.0 in CI v2.6.3 ------ Release Date: 2022-06-05 * Chores * Update development dependencies * Add CI for python 3.11 * Increase test coverage for redis sentinel v2.6.2 ------ Release Date: 2022-05-12 * Compatibility Updates * Update :pypi:`motor` requirements to include 3.x version * Update async redis sentinel implementation to remove use of deprecated methods. * Fix compatibility issue with asyncio redis ``reset`` method in cluster mode when used with :pypi:`coredis` versions >= 3.5.0 v2.6.1 ------ Release Date: 2022-04-25 * Bug Fix * Fix typing regression with strategy constructors `Issue 88 `_ v2.6.0 ------ Release Date: 2022-04-25 * Deprecation * Removed tests for rediscluster using the :pypi:`redis-py-cluster` library * Bug Fix * Fix incorrect ``__slots__`` declaration in :class:`limits.RateLimitItem` and it's subclasses (`Issue #121 `__) v2.5.4 ------ Release Date: 2022-04-25 * Bug Fix * Fix typing regression with strategy constructors `Issue 88 `_ v2.5.3 ------ Release Date: 2022-04-22 * Chore * Automate Github releases v2.5.2 ------ Release Date: 2022-04-17 * Chore * Increase strictness of type checking and annotations * Ensure installations from source distributions are PEP-561 compliant v2.5.1 ------ Release Date: 2022-04-15 * Chore * Ensure storage reset methods have consistent signature v2.5.0 ------ Release Date: 2022-04-13 * Feature * Add support for using redis cluster via the official redis client * Update coredis dependency to use 3.x * Deprecations * Deprecate using redis-py-cluster * Chores * Remove beta tags for async support * Update code base to remove legacy syntax * Tighten up CI test dependencies v2.4.0 ------ Release Date: 2022-03-10 * Feature * Allow passing an explicit connection pool to redis storage. Addresses `Issue 77 `_ v2.3.3 ------ Release Date: 2022-02-03 * Feature * Add support for dns seed list when using mongodb v2.3.2 ------ Release Date: 2022-01-30 * Chores * Improve authentication tests for redis * Update documentation theme * Pin pip version for CI v2.3.1 ------ Release Date: 2022-01-21 * Bug fix * Fix backward incompatible change that separated sentinel and connection args for redis sentinel (introduced in 2.1.0). Addresses `Issue 97 `_ v2.3.0 ------ Release Date: 2022-01-15 * Feature * Add support for custom cost per hit * Bug fix * Fix installation issues with missing setuptools v2.2.0 ------ Release Date: 2022-01-05 * Feature * Enable async redis for python 3.10 via coredis * Chore * Fix typing issue with strategy constructors v2.1.1 ------ Release Date: 2022-01-02 * Feature * Enable async memcache for python 3.10 * Bug fix * Ensure window expiry is reported in local time for mongodb * Fix inconsistent expiry for fixed window with memcached * Chore * Improve strategy tests v2.1.0 ------ Release Date: 2021-12-22 * Feature * Add beta asyncio support * Add beta mongodb support * Add option to install with extras for different storages * Bug fix * Fix custom option for cluster client in memcached * Fix separation of sentinel & connection args in :class:`limits.storage.RedisSentinelStorage` * Deprecation * Deprecate GAEMemcached support * Remove use of unused `no_add` argument in :meth:`limits.storage.MovingWindowSupport.acquire_entry` * Chore * Documentation theme upgrades * Code linting * Add compatibility CI workflow v2.0.3 ------ Release Date: 2021-11-28 * Chore * Ensure package is marked PEP-561 compliant v2.0.1 ------ Release Date: 2021-11-28 * Chore * Added type annotations v2.0.0 ------ Release Date: 2021-11-27 * Chore * Drop support for python < 3.7 v1.6 ---- Release Date: 2021-11-27 * Chore * Final release for python < 3.7 v1.5.1 ------ Release Date: 2020-02-25 * Bug fix * Remove duplicate call to ttl in RedisStorage * Initialize master/slave connections for RedisSentinel once v1.5 ---- Release Date: 2020-01-23 * Bug fix for handling TTL response from Redis when key doesn’t exist * Support Memcache over unix domain socket * Support Memcache cluster * Pass through constructor keyword arguments to underlying storage constructor(s) * CI & test improvements v1.4.1 ------ Release Date: 2019-12-15 * Bug fix for implementation of clear in MemoryStorage not working with MovingWindow v1.4 ---- Release Date: 2019-12-14 * Expose API for clearing individual limits * Support for redis over unix domain socket * Support extra arguments to redis storage v1.3 ------ Release Date: 2018-01-28 * Remove pinging redis on initialization v1.2.1 ------ Release Date: 2017-01-02 * Fix regression with csv as multiple limits v1.2.0 ------ Release Date: 2016-09-21 * Support reset for RedisStorage * Improved rate limit string parsing v1.1.1 ------ Release Date: 2016-03-14 * Support reset for MemoryStorage * Support for `rediss://` storage scheme to connect to redis over ssl v1.1 ---- Release Date: 2015-12-20 * Redis Cluster support * Authentiation for Redis Sentinel * Bug fix for locking failures with redis. v1.0.9 ------ Release Date: 2015-10-08 * Redis Sentinel storage support * Drop support for python 2.6 * Documentation improvements v1.0.7 ------ Release Date: 2015-06-07 * No functional change v1.0.6 ------ Release Date: 2015-05-13 * Bug fixes for .test() logic v1.0.5 ------ Release Date: 2015-05-12 * Add support for testing a rate limit before hitting it. v1.0.3 ------ Release Date: 2015-03-20 * Add support for passing options to storage backend v1.0.2 ------ Release Date: 2015-01-10 * Improved documentation * Improved usability of API. Renamed RateLimitItem subclasses. v1.0.1 ------ Release Date: 2015-01-08 * Example usage in docs. v1.0.0 ------ Release Date: 2015-01-08 * Initial import of common rate limiting code from `Flask-Limiter `_ limits-4.4.1/LICENSE.txt000066400000000000000000000020451476517132700146620ustar00rootroot00000000000000Copyright (c) 2023 Ali-Akber Saifee Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. limits-4.4.1/MANIFEST.in000066400000000000000000000006101476517132700145710ustar00rootroot00000000000000include README.rst include LICENSE.txt include HISTORY.rst include CONTRIBUTIONS.rst include CLASSIFIERS include versioneer.py recursive-include requirements *.txt recursive-include requirements/storage *.txt recursive-include doc/source * recursive-include doc *.py Make* include limits/_version.py include limits/py.typed recursive-include limits *.lua recursive-include limits/resources * limits-4.4.1/Makefile000066400000000000000000000003551476517132700145010ustar00rootroot00000000000000lint: ruff check --select I limits tests ruff check limits tests ruff format --check limits tests mypy limits lint-fix: ruff check --select I --fix limits tests ruff check --fix limits tests ruff format limits tests mypy limits limits-4.4.1/README.rst000066400000000000000000000203461476517132700145320ustar00rootroot00000000000000.. |ci| image:: https://github.com/alisaifee/limits/actions/workflows/main.yml/badge.svg?branch=master :target: https://github.com/alisaifee/limits/actions?query=branch%3Amaster+workflow%3ACI .. |codecov| image:: https://codecov.io/gh/alisaifee/limits/branch/master/graph/badge.svg :target: https://codecov.io/gh/alisaifee/limits .. |pypi| image:: https://img.shields.io/pypi/v/limits.svg?style=flat-square :target: https://pypi.python.org/pypi/limits .. |pypi-versions| image:: https://img.shields.io/pypi/pyversions/limits?style=flat-square :target: https://pypi.python.org/pypi/limits .. |license| image:: https://img.shields.io/pypi/l/limits.svg?style=flat-square :target: https://pypi.python.org/pypi/limits .. |docs| image:: https://readthedocs.org/projects/limits/badge/?version=latest :target: https://limits.readthedocs.org limits ------ |docs| |ci| |codecov| |pypi| |pypi-versions| |license| **limits** is a python library for rate limiting via multiple strategies with commonly used storage backends (Redis, Memcached, MongoDB & Etcd). The library provides identical APIs for use in sync and `async `_ codebases. Supported Strategies ==================== All strategies support the follow methods: - `hit `_: consume a request. - `test `_: check if a request is allowed. - `get_window_stats `_: retrieve remaining quota and reset time. Fixed Window ------------ `Fixed Window `_ This strategy is the most memory‑efficient because it uses a single counter per resource and rate limit. When the first request arrives, a window is started for a fixed duration (e.g., for a rate limit of 10 requests per minute the window expires in 60 seconds from the first request). All requests in that window increment the counter and when the window expires, the counter resets. Burst traffic that bypasses the rate limit may occur at window boundaries. For example, with a rate limit of 10 requests per minute: - At **00:00:45**, the first request arrives, starting a window from **00:00:45** to **00:01:45**. - All requests between **00:00:45** and **00:01:45** count toward the limit. - If 10 requests occur at any time in that window, any further request before **00:01:45** is rejected. - At **00:01:45**, the counter resets and a new window starts which would allow 10 requests until **00:02:45**. Moving Window ------------- `Moving Window `_ This strategy adds each request’s timestamp to a log if the ``nth`` oldest entry (where ``n`` is the limit) is either not present or is older than the duration of the window (for example with a rate limit of ``10 requests per minute`` if there are either less than 10 entries or the 10th oldest entry is at least 60 seconds old). Upon adding a new entry to the log "expired" entries are truncated. For example, with a rate limit of 10 requests per minute: - At **00:00:10**, a client sends 1 requests which are allowed. - At **00:00:20**, a client sends 2 requests which are allowed. - At **00:00:30**, the client sends 4 requests which are allowed. - At **00:00:50**, the client sends 3 requests which are allowed (total = 10). - At **00:01:11**, the client sends 1 request. The strategy checks the timestamp of the 10th oldest entry (**00:00:10**) which is now 61 seconds old and thus expired. The request is allowed. - At **00:01:12**, the client sends 1 request. The 10th oldest entry's timestamp is **00:00:20** which is only 52 seconds old. The request is rejected. Sliding Window Counter ------------------------ `Sliding Window Counter `_ This strategy approximates the moving window while using less memory by maintaining two counters: - **Current bucket:** counts requests in the ongoing period. - **Previous bucket:** counts requests in the immediately preceding period. When a request arrives, the effective request count is calculated as:: weighted_count = current_count + floor(previous_count * weight) The weight is based on how much time has elapsed in the current bucket:: weight = (bucket_duration - elapsed_time) / bucket_duration If ``weighted_count`` is below the limit, the request is allowed. For example, with a rate limit of 10 requests per minute: Assume: - The current bucket (spanning **00:01:00** to **00:02:00**) has 8 hits. - The previous bucket (spanning **00:00:00** to **00:01:00**) has 4 hits. Scenario 1: - A new request arrives at **00:01:30**, 30 seconds into the current bucket. - ``weight = (60 - 30) / 60 = 0.5``. - ``weighted_count = floor(8 + (4 * 0.5)) = floor(8 + 2) = 10``. - Since the weighted count equals the limit, the request is rejected. Scenario 2: - A new request arrives at **00:01:40**, 40 seconds into the current bucket. - ``weight = (60 - 40) / 60 ≈ 0.33``. - ``weighted_count = floor(8 + (4 * 0.33)) = floor(8 + 1.32) = 9``. - Since the weighted count is below the limit, the request is allowed. Storage backends ================ - `Redis `_ - `Memcached `_ - `MongoDB `_ - `Etcd `_ - `In-Memory `_ Dive right in ============= Initialize the storage backend .. code-block:: python from limits import storage backend = storage.MemoryStorage() # or memcached backend = storage.MemcachedStorage("memcached://localhost:11211") # or redis backend = storage.RedisStorage("redis://localhost:6379") # or mongodb backend = storage.MongoDbStorage("mongodb://localhost:27017") # or use the factory storage_uri = "memcached://localhost:11211" backend = storage.storage_from_string(storage_uri) Initialize a rate limiter with a strategy .. code-block:: python from limits import strategies strategy = strategies.MovingWindowRateLimiter(backend) # or fixed window strategy = strategies.FixedWindowRateLimiter(backend) # or sliding window strategy = strategies.SlidingWindowCounterRateLimiter(backend) Initialize a rate limit .. code-block:: python from limits import parse one_per_minute = parse("1/minute") Initialize a rate limit explicitly .. code-block:: python from limits import RateLimitItemPerSecond one_per_second = RateLimitItemPerSecond(1, 1) Test the limits .. code-block:: python import time assert True == strategy.hit(one_per_minute, "test_namespace", "foo") assert False == strategy.hit(one_per_minute, "test_namespace", "foo") assert True == strategy.hit(one_per_minute, "test_namespace", "bar") assert True == strategy.hit(one_per_second, "test_namespace", "foo") assert False == strategy.hit(one_per_second, "test_namespace", "foo") time.sleep(1) assert True == strategy.hit(one_per_second, "test_namespace", "foo") Check specific limits without hitting them .. code-block:: python assert True == strategy.hit(one_per_second, "test_namespace", "foo") while not strategy.test(one_per_second, "test_namespace", "foo"): time.sleep(0.01) assert True == strategy.hit(one_per_second, "test_namespace", "foo") Query available capacity and reset time for a limit .. code-block:: python assert True == strategy.hit(one_per_minute, "test_namespace", "foo") window = strategy.get_window_stats(one_per_minute, "test_namespace", "foo") assert window.remaining == 0 assert False == strategy.hit(one_per_minute, "test_namespace", "foo") time.sleep(window.reset_time - time.time()) assert True == strategy.hit(one_per_minute, "test_namespace", "foo") Links ===== * `Documentation `_ * `Changelog `_ limits-4.4.1/doc/000077500000000000000000000000001476517132700136035ustar00rootroot00000000000000limits-4.4.1/doc/Makefile000066400000000000000000000152271476517132700152520ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Flask-Ratelimit.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Flask-Ratelimit.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Flask-Ratelimit" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Flask-Ratelimit" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." limits-4.4.1/doc/source/000077500000000000000000000000001476517132700151035ustar00rootroot00000000000000limits-4.4.1/doc/source/_static/000077500000000000000000000000001476517132700165315ustar00rootroot00000000000000limits-4.4.1/doc/source/_static/custom.css000066400000000000000000000002051476517132700205520ustar00rootroot00000000000000.badges { display: flex; padding: 5px; flex-direction: rootow; justify-content: center; } .header-badge { padding: 2px; } limits-4.4.1/doc/source/api.rst000066400000000000000000000076611476517132700164200ustar00rootroot00000000000000:tocdepth: 4 ============= API Reference ============= .. autosummary:: limits limits.strategies limits.storage limits.aio.strategies limits.aio.storage .. currentmodule:: limits Strategies ========== Synchronous Strategies ---------------------- The available built in rate limiting strategies which expect a single parameter: a subclass of :class:`~limits.storage.Storage`. .. currentmodule:: limits.strategies Provided by :mod:`limits.strategies` .. autoclass:: FixedWindowRateLimiter .. autoclass:: FixedWindowElasticExpiryRateLimiter .. autoclass:: MovingWindowRateLimiter .. autoclass:: SlidingWindowCounterRateLimiter All strategies implement the same abstract base class: .. autoclass:: RateLimiter Async Strategies ---------------- These variants should be used in for asyncio support. These strategies expose async variants and expect a subclass of :class:`limits.aio.storage.Storage` .. currentmodule:: limits.aio.strategies Provided by :mod:`limits.aio.strategies` .. autoclass:: FixedWindowRateLimiter .. autoclass:: FixedWindowElasticExpiryRateLimiter .. autoclass:: MovingWindowRateLimiter .. autoclass:: SlidingWindowCounterRateLimiter All strategies implement the same abstract base class: .. autoclass:: RateLimiter Storage ======= Storage Factory function ------------------------ Provided by :mod:`limits.storage` .. autofunction:: limits.storage.storage_from_string Synchronous Storage ------------------- Provided by :mod:`limits.storage` .. currentmodule:: limits.storage In-Memory Storage ^^^^^^^^^^^^^^^^^ .. autoclass:: MemoryStorage Redis Storage ^^^^^^^^^^^^^ .. autoclass:: RedisStorage Redis Cluster Storage ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: RedisClusterStorage Redis Sentinel Storage ^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: RedisSentinelStorage Memcached Storage ^^^^^^^^^^^^^^^^^ .. autoclass:: MemcachedStorage MongoDB Storage ^^^^^^^^^^^^^^^ .. autoclass:: MongoDBStorage Etcd Storage ^^^^^^^^^^^^ .. autoclass:: EtcdStorage Async Storage ------------- Provided by :mod:`limits.aio.storage` .. currentmodule:: limits.aio.storage Async In-Memory Storage ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: MemoryStorage Async Redis Storage ^^^^^^^^^^^^^^^^^^^ .. autoclass:: RedisStorage Async Redis Cluster Storage ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: RedisClusterStorage Async Redis Sentinel Storage ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: RedisSentinelStorage Async Memcached Storage ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: MemcachedStorage Async MongoDB Storage ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: MongoDBStorage Async Etcd Storage ^^^^^^^^^^^^^^^^^^ .. autoclass:: EtcdStorage Abstract storage classes ------------------------ .. autoclass:: limits.storage.Storage .. autoclass:: limits.storage.MovingWindowSupport .. autoclass:: limits.storage.SlidingWindowCounterSupport Async Abstract storage classes -------------------------------- .. autoclass:: limits.aio.storage.Storage .. autoclass:: limits.aio.storage.MovingWindowSupport .. autoclass:: limits.aio.storage.SlidingWindowCounterSupport Rate Limits =========== .. currentmodule:: limits Provided by :mod:`limits` Parsing functions ----------------- .. autofunction:: parse .. autofunction:: parse_many Rate limit granularities ------------------------ All rate limit items implement :class:`RateLimitItem` by declaring a :attr:`GRANULARITY` .. autoclass:: RateLimitItem ------ .. autoclass:: RateLimitItemPerSecond .. autoclass:: RateLimitItemPerMinute .. autoclass:: RateLimitItemPerHour .. autoclass:: RateLimitItemPerDay .. autoclass:: RateLimitItemPerMonth .. autoclass:: RateLimitItemPerYear Structures ========== .. autoclass:: limits.WindowStats :no-inherited-members: Exceptions ========== .. autoexception:: limits.errors.ConfigurationError :no-inherited-members: .. autoexception:: limits.errors.ConcurrentUpdateError :no-inherited-members: .. autoexception:: limits.errors.StorageError :no-inherited-members: limits-4.4.1/doc/source/async.rst000066400000000000000000000024751476517132700167620ustar00rootroot00000000000000============= Async Support ============= .. versionadded:: 2.1 A new namespace ``limits.aio`` is available which mirrors the original ``limits.storage`` and ``limits.strategies`` packages. The following async storage backends are implemented: - In-Memory - Redis (via `coredis `_ or `redis-py `_. Refer to :paramref:`limits.aio.storage.RedisStorage.implementation` for details on selecting the dependency) - Memcached (via `emcache `_) - MongoDB (via `motor `_) - Etcd (via `aetcd `_) Quick start =========== This example demonstrates the subtle differences in the ``limits.aio`` namespace: .. code:: from limits import parse from limits.storage import storage_from_string from limits.aio.strategies import MovingWindowRateLimiter redis = storage_from_string("async+redis://localhost:6379") moving_window = MovingWindowRateLimiter(redis) one_per_minute = parse("1/minute") async def hit(): return await moving_window.hit(one_per_minute, "test_namespace", "foo") Refer to :ref:`api:async storage` for more implementation details of the async storage backends, and :ref:`api:async strategies` for the async rate limit strategies API. limits-4.4.1/doc/source/changelog.rst000066400000000000000000000000371476517132700175640ustar00rootroot00000000000000.. include:: ../../HISTORY.rst limits-4.4.1/doc/source/conf.py000066400000000000000000000053621476517132700164100ustar00rootroot00000000000000# import os import sys sys.path.insert(0, os.path.abspath("../../")) sys.path.insert(0, os.path.abspath("./")) from theme_config import * import limits project = "limits" description = "limits is a python library to perform rate limiting with commonly used storage backends" copyright = "2023, Ali-Akber Saifee" if ".post0.dev" in limits.__version__: version, ahead = limits.__version__.split(".post0.dev") else: version = limits.__version__ release = version html_static_path = ["./_static"] html_css_files = [ "custom.css", "https://fonts.googleapis.com/css2?family=Fira+Code:wght@300;400;700&family=Fira+Sans:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&display=swap", ] html_title = f"{project} {{{release}}}" try: ahead = int(ahead) if ahead > 0: html_theme_options[ "announcement" ] = f""" This is a development version. The documentation for the latest version: {release} can be found here """ html_title = f"{project} {{dev}}" except: pass extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosectionlabel", "sphinx.ext.autosummary", "sphinx.ext.extlinks", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.viewcode", "sphinx.ext.mathjax", "sphinxext.opengraph", "sphinxcontrib.programoutput", "sphinx_copybutton", "sphinx_inline_tabs", "sphinx_paramlinks", ] autodoc_default_options = { "members": True, "inherited-members": True, "inherit-docstrings": True, "member-order": "bysource", } add_module_names = False autoclass_content = "both" autodoc_typehints_format = "short" autosectionlabel_maxdepth = 3 autosectionlabel_prefix_document = True extlinks = {"pypi": ("https://pypi.org/project/%s", "%s")} intersphinx_mapping = { "python": ("http://docs.python.org/", None), "coredis": ("https://coredis.readthedocs.io/en/latest/", None), "emcache": ("https://emcache.readthedocs.io/en/latest/", None), "motor": ("https://motor.readthedocs.io/en/stable/", None), "redis-py-cluster": ("https://redis-py-cluster.readthedocs.io/en/latest/", None), "redis-py": ("https://redis-py.readthedocs.io/en/latest/", None), "pymemcache": ("https://pymemcache.readthedocs.io/en/latest/", None), "pymongo": ("https://pymongo.readthedocs.io/en/stable/", None), "python-etcd3": ("https://python-etcd3.readthedocs.io/en/latest/", None), "aetcd": ("https://aetcd.readthedocs.io/en/latest/", None), "valkey-py": ("https://valkey-py.readthedocs.io/en/latest/", None), } limits-4.4.1/doc/source/custom-storage.rst000066400000000000000000000107661476517132700206230ustar00rootroot00000000000000.. currentmodule:: limits ======================= Custom storage backends ======================= The **limits** package ships with a few storage implementations which allow you to get started with some common data stores (Redis & Memcached) used for rate limiting. To accommodate customizations to either the default storage backends or different storage backends altogether, **limits** uses a registry pattern that makes it painless to add your own custom storage (without having to submit patches to the package itself). Creating a custom backend requires: #. Subclassing :class:`limits.storage.Storage` or :class:`limits.aio.storage.Storage` and implementing the abstract methods. This will allow the storage to be used with the :ref:`strategies:fixed window` strategies. #. If the storage can support the :ref:`strategies:moving window` strategy – additionally implementing the methods from :class:`~limits.storage.MovingWindowSupport` #. If the storage can support the :ref:`strategies:sliding window counter` strategy – additionally implementing the methods from :class:`~limits.storage.SlidingWindowCounterSupport` #. Providing naming *schemes* that can be used to look up the custom storage in the storage registry. (Refer to :ref:`storage:storage scheme` for more details) Example ======= The following example shows two backend stores: one which only supports the :ref:`strategies:fixed window` strategy and one that implements all strategies. Note the :code:`STORAGE_SCHEME` class variables which result in the classes getting registered with the **limits** storage registry:: import time from urllib.parse import urlparse from typing import Tuple, Type, Union from limits.storage import Storage, MovingWindowSupport, SlidingWindowCounterSupport class BasicStorage(Storage): """A simple fixed-window storage backend.""" STORAGE_SCHEME = ["basicdb"] def __init__(self, uri: str, **options) -> None: self.host = urlparse(uri).hostname or "" self.port = urlparse(uri).port or 0 @property def base_exceptions(self) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: return () def check(self) -> bool: return True def get_expiry(self, key: str) -> int: return int(time.time()) def incr(self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1) -> int: return amount def get(self, key: str) -> int: return 0 def reset(self) -> int: return 0 def clear(self, key: str) -> None: pass class AdvancedStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport): """A more advanced storage backend supporting all rate-limiting strategies.""" STORAGE_SCHEME = ["advanceddatabase"] def __init__(self, uri: str, **options) -> None: self.host = urlparse(uri).hostname or "" self.port = urlparse(uri).port or 0 @property def base_exceptions(self) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: return () def check(self) -> bool: return True def get_expiry(self, key: str) -> int: return int(time.time()) def incr(self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1) -> int: return amount def get(self, key: str) -> int: return 0 def reset(self) -> int: return 0 def clear(self, key: str) -> None: pass # --- Moving Window Support --- def acquire_entry(self, key: str, limit: int, expiry: int, amount: int = 1) -> bool: return True def get_moving_window(self, key: str, limit: int, expiry: int) -> Tuple[float, int]: return (time.time(), 0) # --- Sliding Window Counter Support --- def acquire_sliding_window_entry(self, key: str, limit: int, expiry: int, amount: int = 1) -> bool: return True def get_sliding_window(self, key: str, expiry: int) -> Tuple[int, float, int, float]: return (0, expiry / 2, 0, expiry) Once the above implementations are declared, you can look them up using the :ref:`api:storage factory function` in the following manner:: from limits.storage import storage_from_string basic_store = storage_from_string("basicdb://localhost:42") advanced_store = storage_from_string("advanceddatabase://localhost:42") limits-4.4.1/doc/source/index.rst000066400000000000000000000064431476517132700167530ustar00rootroot00000000000000======== *limits* ======== .. container:: badges .. image:: https://img.shields.io/github/last-commit/alisaifee/limits?logo=github&style=for-the-badge&labelColor=#282828 :target: https://github.com/alisaifee/limits :class: header-badge .. image:: https://img.shields.io/github/actions/workflow/status/alisaifee/limits/main.yml?logo=github&style=for-the-badge&labelColor=#282828 :target: https://github.com/alisaifee/limits/actions/workflows/main.yml .. image:: https://img.shields.io/codecov/c/github/alisaifee/limits?logo=codecov&style=for-the-badge&labelColor=#282828 :target: https://app.codecov.io/gh/alisaifee/limits :class: header-badge .. image:: https://img.shields.io/pypi/pyversions/limits?style=for-the-badge&logo=pypi :target: https://pypi.org/project/limits :class: header-badge ---- **limits** is a python library for rate limiting via multiple strategies with commonly used storage backends (Redis, Memcached, MongoDB & Etcd). The library provides identical APIs for use in sync and :ref:`async ` codebases. Get started by taking a look at :ref:`installation:installation` and :ref:`quickstart:quickstart`. To learn more about the different strategies refer to the :ref:`strategies:rate limiting strategies` section. For an overview of supported backends refer to :ref:`storage:storage backends`. .. toctree:: :maxdepth: 3 :hidden: installation quickstart strategies storage async api custom-storage changelog ---- Development =========== The source is available on `Github `_ To get started .. code:: console $ git clone https://github.com/alisaifee/limits.git $ cd limits $ pip install -r requirements/dev.txt Since `limits` integrates with various backend storages, local development and running tests requires a working `docker & docker-compose installation `_. Running the tests will start the relevant containers automatically - but will leave them running so as to not incur the overhead of starting up on each test run. To run the tests: .. code:: console $ pytest Once you're done - you will probably want to clean up the docker containers: .. code:: console $ docker-compose down Projects using *limits* ======================= - `Flask-Limiter `_ : Rate limiting extension for Flask applications. - `djlimiter `_: Rate limiting middleware for Django applications. - `sanic-limiter `_: Rate limiting middleware for Sanic applications. - `Falcon-Limiter `_ : Rate limiting extension for Falcon applications. - `django-ratelimiter `_: Rate limiting decorator and middleware for Django applications. References ========== - `Redis: rate limiting pattern #2 `_ - `DomainTools: redis rate limiter `_ - `Cloudflare: How we built rate limiting capable of scaling to millions of domains `_ .. include:: ../../CONTRIBUTIONS.rst limits-4.4.1/doc/source/installation.rst000066400000000000000000000044721476517132700203450ustar00rootroot00000000000000============ Installation ============ Install the package with pip: .. code:: console $ pip install limits .. tab:: Redis .. code:: console $ pip install limits[redis] Includes .. literalinclude:: ../../requirements/storage/redis.txt .. tab:: RedisCluster .. code:: console $ pip install limits[rediscluster] Includes .. literalinclude:: ../../requirements/storage/rediscluster.txt .. tab:: Memcached .. code:: console $ pip install limits[memcached] Includes .. literalinclude:: ../../requirements/storage/memcached.txt .. tab:: MongoDB .. code:: console $ pip install limits[mongodb] Includes: .. literalinclude:: ../../requirements/storage/mongodb.txt .. tab:: Etcd .. code:: console $ pip install limits[etcd] Includes: .. literalinclude:: ../../requirements/storage/etcd.txt .. tab:: Valkey .. code:: console $ pip install limits[valkey] Includes: .. literalinclude:: ../../requirements/storage/valkey.txt More details around the specifics of each storage backend can be found in :ref:`storage` Async Storage ============= If you are using an async code base you can install the storage dependencies along with the package using the following extras: .. tab:: Redis .. code:: console $ pip install limits[async-redis] Includes: .. literalinclude:: ../../requirements/storage/async-redis.txt .. versionadded:: 4.2 :pypi:`redis` if installed can be used instead of :pypi:`coredis` by setting :paramref:`~limits.aio.storage.Redis.implementation` to ``redispy``. See :class:`limits.aio.storage.RedisStorage` for more details. .. tab:: Memcached .. code:: console $ pip install limits[async-memcached] Includes: .. literalinclude:: ../../requirements/storage/async-memcached.txt .. tab:: MongoDB .. code:: console $ pip install limits[async-mongodb] Includes: .. literalinclude:: ../../requirements/storage/async-mongodb.txt .. tab:: Etcd .. code:: console $ pip install limits[async-etcd] Includes: .. literalinclude:: ../../requirements/storage/async-etcd.txt .. tab:: Valkey .. code:: console $ pip install limits[async-valkey] Includes: .. literalinclude:: ../../requirements/storage/async-valkey.txt limits-4.4.1/doc/source/quickstart.rst000066400000000000000000000105711476517132700200330ustar00rootroot00000000000000========== Quickstart ========== .. note:: In an async context, simply substitute the imports from :mod:`limits` to :mod:`limits.aio` and use ``await`` in the storage and limiter methods. Initialize the strategy & storage ================================= Initialize the storage backend ------------------------------ .. tab:: In Memory .. code:: from limits import storage limits_storage = storage.MemoryStorage() .. tab:: Memcached .. code:: from limits import storage limits_storage = storage.MemcachedStorage( "memcached://localhost:11211" ) .. tab:: Redis .. code:: from limits import storage limits_storage = storage.RedisStorage("redis://localhost:6379/1") Initialize a rate limiter -------------------------- .. tab:: With the Fixed window strategy .. code:: from limits import strategies limiter = strategies.FixedWindowRateLimiter(limits_storage) .. tab:: With the Moving window strategy .. caution:: If the storage used does not support the moving window strategy, :exc:`NotImplementedError` will be raised .. code:: from limits import strategies limiter = strategies.MovingWindowRateLimiter(limits_storage) .. tab:: With the Sliding window counter strategy .. caution:: If the storage used does not support the sliding window counter strategy, :exc:`NotImplementedError` will be raised .. code:: from limits import strategies limiter = strategies.SlidingWindowCounterRateLimiter(limits_storage) Describe the rate limit ======================= Initialize a rate limit using the :ref:`string notation` ----------------------------------------------------------------------------------------------- .. code:: from limits import parse one_per_minute = parse("1/minute") Initialize a rate limit explicitly using a subclass of :class:`~limits.RateLimitItem` ------------------------------------------------------------------------------------- .. code:: from limits import RateLimitItemPerSecond one_per_second = RateLimitItemPerSecond(1, 1) Test the limits =============== Consume the limits ------------------ .. code:: assert True == limiter.hit(one_per_minute, "test_namespace", "foo") assert False == limiter.hit(one_per_minute, "test_namespace", "foo") assert True == limiter.hit(one_per_minute, "test_namespace", "bar") assert True == limiter.hit(one_per_second, "test_namespace", "foo") assert False == limiter.hit(one_per_second, "test_namespace", "foo") time.sleep(1) assert True == limiter.hit(one_per_second, "test_namespace", "foo") Check without consuming ----------------------- .. code:: assert True == limiter.hit(one_per_second, "test_namespace", "foo") while not limiter.test(one_per_second, "test_namespace", "foo"): time.sleep(0.01) assert True == limiter.hit(one_per_second, "test_namespace", "foo") Query available capacity and reset time ----------------------------------------- .. code:: assert True == limiter.hit(one_per_minute, "test_namespace", "foo") window = limiter.get_window_stats(one_per_minute, "test_namespace", "foo") assert window.remaining == 0 assert False == limiter.hit(one_per_minute, "test_namespace", "foo") time.sleep(window.reset_time - time.time()) assert True == limiter.hit(one_per_minute, "test_namespace", "foo") Clear a limit ============= .. code:: assert True == limiter.hit(one_per_minute, "test_namespace", "foo") assert False == limiter.hit(one_per_minute, "test_namespace", "foo") limiter.clear(one_per_minute, "test_namespace", "foo") assert True == limiter.hit(one_per_minute, "test_namespace", "foo") .. _ratelimit-string: ========================== Rate limit string notation ========================== Instead of manually constructing instances of :class:`~limits.RateLimitItem` you can instead use the following :ref:`api:parsing functions`. - :func:`~limits.parse` - :func:`~limits.parse_many` These functions accept rate limits specified as strings following the format:: [count] [per|/] [n (optional)] [second|minute|hour|day|month|year] You can combine rate limits by separating them with a delimiter of your choice. Examples ======== * ``10 per hour`` * ``10/hour`` * ``10/hour;100/day;2000 per year`` * ``100/day, 500/7days`` limits-4.4.1/doc/source/storage.rst000066400000000000000000000230131476517132700173000ustar00rootroot00000000000000.. currentmodule:: limits .. _storage: ================ Storage Backends ================ Supported versions ================== .. image:: https://img.shields.io/github/actions/workflow/status/alisaifee/limits/compatibility.yml?logo=github&style=for-the-badge&labelColor=#282828 :alt: GitHub Workflow Status :target: https://github.com/alisaifee/limits/actions/workflows/compatibility.yml ----- **limits** is tested and known to work with the following versions of the dependency libraries and the associated storage versions. The CI tests against these versions on a nightly basis and you can see the results in `github `_. .. tab:: Redis Dependency versions: .. literalinclude:: ../../requirements/storage/redis.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-redis.txt .. note:: .. versionadded:: 4.2 :pypi:`redis` can be used instead of :pypi:`coredis` by setting :paramref:`limits.aio.storage.RedisStorage.implementation` to ``redispy`` `Redis `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_REDIS_SERVER_VERSION=[\d\.]+' | cut -d = -f 2 | sort --version-sort | uniq" Redis with SSL .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_REDIS_SERVER_SSL_VERSION=[\d\.]+' | cut -d = -f 2 | sort --version-sort | uniq" `Redis Sentinel `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_REDIS_SENTINEL_SERVER_VERSION=[\d\.]+' | cut -d = -f 2 | sort --version-sort | uniq" .. tab:: Redis Cluster Dependency versions: .. literalinclude:: ../../requirements/storage/rediscluster.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-redis.txt .. note:: .. versionadded:: 4.2 :pypi:`redis` can be used instead of :pypi:`coredis` by setting :paramref:`limits.aio.storage.RedisClusterStorage.implementation` to ``redispy`` `Redis cluster `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_REDIS_SERVER_VERSION=[\d\.]+' | cut -d = -f 2 | sort --version-sort | uniq" .. tab:: Memcached Dependency versions: .. literalinclude:: ../../requirements/storage/memcached.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-memcached.txt `Memcached `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_MEMCACHED_SERVER_VERSION=[\d\.]+' | cut -d = -f 2 | sort --version-sort | uniq" .. tab:: MongoDB Dependency versions: .. literalinclude:: ../../requirements/storage/mongodb.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-mongodb.txt `MongoDB `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_MONGODB_SERVER_VERSION=[\d\.]+' | cut -d = -f 2 | sort --version-sort | uniq" .. tab:: Etcd Dependency versions: .. literalinclude:: ../../requirements/storage/etcd.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-etcd.txt `Etcd `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_ETCD_SERVER_VERSION=[\d\.]+' | cut -d = -f 2 | sort --version-sort | uniq" .. tab:: Valkey Dependency versions: .. literalinclude:: ../../requirements/storage/valkey.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-valkey.txt `Valkey `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_VALKEY_SERVER_VERSION=[\d\.]+' | cut -d = -f 2 | sort --version-sort | uniq" Storage scheme ============== **limits** uses a url style storage scheme notation (similar to the JDBC driver connection string notation) for configuring and initializing storage backends. This notation additionally provides a simple mechanism to both identify and configure the backend implementation based on a single string argument. The storage scheme follows the format :code:`{scheme}://{parameters}` :func:`limits.storage.storage_from_string` is provided to lookup and construct an instance of a storage based on the storage scheme. For example:: import limits.storage uri = "redis://localhost:9999" options = {} redis_storage = limits.storage.storage_from_string(uri, **options) The additional `options` key-word arguments are passed as is to the constructor of the storage and handled differently by each implementation. Please refer to the API documentation in the :ref:`api:storage` section for details. Examples ======== In-Memory Storage ----------------- The in-memory storage (:class:`~limits.storage.MemoryStorage`) takes no parameters so the only relevant value is :code:`memory://` Memcached Storage ----------------- Requires the location of the memcached server(s). As such the parameters is a comma separated list of :code:`{host}:{port}` locations such as :code:`memcached://localhost:11211` or :code:`memcached://localhost:11211,localhost:11212,192.168.1.1:11211` etc... or a path to a unix domain socket such as :code:`memcached:///var/tmp/path/to/sock` Depends on: :pypi:`pymemcache` Redis Storage ------------- Requires the location of the redis server and optionally the database number. :code:`redis://localhost:6379` or :code:`redis://localhost:6379/n` (for database `n`). If the redis server is listening over a unix domain socket you can use :code:`redis+unix:///path/to/sock` or :code:`redis+unix:///path/to/socket?db=n` (for database `n`). If the database is password protected the password can be provided in the url, for example :code:`redis://:foobared@localhost:6379` or :code:`redis+unix://:foobered/path/to/socket` if using a UDS. For scenarios where a redis connection pool is already available and can be reused, it can be provided in :paramref:`~limits.storage.storage_from_string.options`, for example:: pool = redis.connections.BlockingConnectionPool.from_url("redis://.....") storage_from_string("redis://", connection_pool=pool) Depends on: :pypi:`redis` .. versionadded:: 4.3 If the database ``uri`` scheme uses ``valkey`` instead of ``redis`` the implementation used will be from :pypi:`valkey` instead of :pypi:`redis`. Redis+SSL Storage ----------------- The official Redis client :pypi:`redis` supports redis connections over SSL with the scheme You can add ssl related parameters in the url itself, for example: :code:`rediss://localhost:6379/0?ssl_ca_certs=./tls/ca.crt&ssl_keyfile=./tls/client.key`. Depends on: :pypi:`redis` Redis+Sentinel Storage ---------------------- Requires the location(s) of the redis sentinel instances and the `service-name` that is monitored by the sentinels. :code:`redis+sentinel://localhost:26379/my-redis-service` or :code:`redis+sentinel://localhost:26379,localhost:26380/my-redis-service`. If the sentinel is password protected the username and/or password can be provided in the url, for example :code:`redis+sentinel://:sekret@localhost:26379/my-redis-service` When authentication details are provided in the url they will be used for both the sentinel and as connection arguments for the underlying redis nodes managed by the sentinel. If you need fine grained control it is recommended to use the additional :paramref:`~limits.storage.storage_from_string.options` arguments. More details can be found in the API documentation for :class:`~limits.storage.RedisSentinelStorage` (or the async version: :class:`~limits.aio.storage.RedisSentinelStorage`). Depends on: :pypi:`redis` Redis Cluster Storage --------------------- Requires the location(s) of the redis cluster startup nodes (One is enough). :code:`redis+cluster://localhost:7000` or :code:`redis+cluster://localhost:7000,localhost:7001` If the cluster is password protected the username and/or password can be provided in the url, for example :code:`redis+cluster://:sekret@localhost:7000,localhost:7001` Depends on: :pypi:`redis` MongoDB Storage --------------- Requires the location(s) of a mongodb installation using the uri schema described by the `Mongodb URI Specification `_ Examples: - Local instance: ``mongodb://localhost:27017/`` - Instance with SSL: ``mongodb://mymongo.com/?tls=true`` - Local instance with SSL & self signed/invalid certificate: ``mongodb://localhost:27017/?tls=true&tlsAllowInvalidCertificates=true`` Depends on: :pypi:`pymongo` Etcd Storage ------------ Requires the location of an etcd node Example: ``etcd://localhost:2379`` Depends on: :pypi:`etcd3` Async Storage ============= .. versionadded:: 2.1 When using limits in an async code base the same uri schema can be used to query for an async implementation of the storage by prefixing the scheme with ``async+``. For example: - ``async+redis://localhost:6379/0`` - ``async+rediss://localhost:6379/0`` - ``async+redis+cluster://localhost:7000,localhost:7001`` - ``async+redis+sentinel://:sekret@localhost:26379/my-redis-service`` - ``async+memcached://localhost:11211`` - ``async+etcd://localhost:2379`` - ``async+memory://`` For implementation details of currently supported async backends refer to :ref:`api:async storage` limits-4.4.1/doc/source/strategies.rst000066400000000000000000000123551476517132700200150ustar00rootroot00000000000000======================== Rate Limiting Strategies ======================== TL;DR: How to choose a strategy =============================== - **Fixed Window:** Use when low memory usage and high performance are critical, and occasional bursts are acceptable or can be mitigated by additional fine-grained limits. - **Moving Window:** Use when exactly accurate rate limiting is required and extra memory overhead is acceptable. - **Sliding Window Counter:** Use when a balance between memory efficiency and accuracy is needed. This strategy smooths transitions between time periods with less overhead than a full moving window, though it may trade off some precision near bucket boundaries. Fixed Window ============ This strategy is the most memory‑efficient because it uses a single counter per resource and rate limit. When the first request arrives, a window is started for a fixed duration (e.g., for a rate limit of 10 requests per minute the window expires in 60 seconds from the first request). All requests in that window increment the counter and when the window expires, the counter resets. Burst traffic that bypasses the rate limit may occur at window boundaries. For example, with a rate limit of 10 requests per minute: - At **00:00:45**, the first request arrives, starting a window from **00:00:45** to **00:01:45**. - All requests between **00:00:45** and **00:01:45** count toward the limit. - If 10 requests occur at any time in that window, any further request before **00:01:45** is rejected. - At **00:01:45**, the counter resets and a new window starts which would allow 10 requests until **00:02:45**. .. tip:: To mitigate burstiness (e.g., many requests at window edges), combine limits with large windows with finer-granularity ones (e.g., combine a 2 requests per second limit with a 10 requests per minute limit). Fixed Window with Elastic Expiry ================================== .. deprecated:: 4.1 This variant extends the window’s expiry with each hit by resetting the timer on every request. Although designed to impose larger penalties for breaches, it is now deprecated and should not be used. Moving Window ============= This strategy adds each request’s timestamp to a log if the ``nth`` oldest entry (where ``n`` is the limit) is either not present or is older than the duration of the window (for example with a rate limit of ``10 requests per minute`` if there are either less than 10 entries or the 10th oldest entry is at least 60 seconds old). Upon adding a new entry to the log "expired" entries are truncated. For example, with a rate limit of 10 requests per minute: - At **00:00:10**, a client sends 1 requests which are allowed. - At **00:00:20**, a client sends 2 requests which are allowed. - At **00:00:30**, the client sends 4 requests which are allowed. - At **00:00:50**, the client sends 3 requests which are allowed (total = 10). - At **00:01:11**, the client sends 1 request. The strategy checks the timestamp of the 10th oldest entry (**00:00:10**) which is now 61 seconds old and thus expired. The request is allowed. - At **00:01:12**, the client sends 1 request. The 10th oldest entry's timestamp is **00:00:20** which is only 52 seconds old. The request is rejected. Sliding Window Counter ======================= .. versionadded:: 4.1 This strategy approximates the moving window while using less memory by maintaining two counters: - **Current bucket:** counts requests in the ongoing period. - **Previous bucket:** counts requests in the immediately preceding period. A weighted sum of these counters is computed based on the elapsed time in the current bucket. The weighted count is defined as: .. math:: C_{\text{weighted}} = \left\lfloor C_{\text{current}} + \left(C_{\text{prev}} \times w\right) \right\rfloor and the weight factor :math:`w` is calculated as: .. math:: w = \frac{T_{\text{exp}} - T_{\text{elapsed}}}{T_{\text{exp}}} Where: - :math:`T_{\text{exp}}` is the bucket duration. - :math:`T_{\text{elapsed}}` is the time elapsed since the bucket shifted. - :math:`C_{\text{prev}}` is the previous bucket's count. - :math:`C_{\text{current}}` is the current bucket's count. For example, with a rate limit of ``100 requests per minute`` Suppose: - Current bucket has 80 hits (:math:`C_{\text{current}}`) - Previous bucket has 40 hits (:math:`C_{\text{prev}}`) - If the bucket shifted 30 seconds ago (:math:`T_{\text{elapsed}} = 30`). .. math:: w = \frac{60 - 30}{60} = 0.5 .. math:: C_{\text{weighted}} = \left\lfloor 80 + (0.5 \times 40) \right\rfloor = 100 Since the effective count equals the limit, a new request is rejected. - If the bucket shifted 40 seconds ago (:math:`T_{\text{elapsed}} = 40`). .. math:: w = \frac{60 - 40}{60} \approx 0.33 .. math:: C_{\text{weighted}} = \left\lfloor 80 + (0.33 \times 40) \right\rfloor = 93 Since the effective count is below the limit, a new request is allowed. .. note:: Some storage implementations use fixed bucket boundaries (e.g., aligning buckets with clock intervals), while others adjust buckets dynamically based on the first hit. This difference can allow an attacker to bypass limits during the initial sampling period. The affected implementations are ``memcached`` and ``in-memory``. limits-4.4.1/doc/source/theme_config.py000066400000000000000000000026601476517132700201100ustar00rootroot00000000000000colors = { "bg0": " #fbf1c7", "bg1": " #ebdbb2", "bg2": " #d5c4a1", "bg3": " #bdae93", "bg4": " #a89984", "gry": " #928374", "fg4": " #7c6f64", "fg3": " #665c54", "fg2": " #504945", "fg1": " #3c3836", "fg0": " #282828", "red": " #cc241d", "red2": " #9d0006", "orange": " #d65d0e", "orange2": " #af3a03", "yellow": " #d79921", "yellow2": " #b57614", "green": " #98971a", "green2": " #79740e", "aqua": " #689d6a", "aqua2": " #427b58", "blue": " #458588", "blue2": " #076678", "purple": " #b16286", "purple2": " #8f3f71", } html_theme = "furo" html_theme_options = { "light_css_variables": { "font-stack": "Fira Sans, sans-serif", "font-stack--monospace": "Fira Code, monospace", "color-brand-primary": colors["purple2"], "color-brand-content": colors["blue2"], }, "dark_css_variables": { "color-brand-primary": colors["purple"], "color-brand-content": colors["blue"], "color-background-primary": colors["fg1"], "color-background-secondary": colors["fg0"], "color-foreground-primary": colors["bg0"], "color-foreground-secondary": colors["bg1"], "color-highlighted-background": colors["yellow"], "color-highlight-on-target": colors["fg2"], }, } highlight_language = "python3" pygments_style = "gruvbox-light" pygments_dark_style = "gruvbox-dark" limits-4.4.1/docker-compose.yml000066400000000000000000000275271476517132700165100ustar00rootroot00000000000000version: '3.2' services: # etcd etcd: image: "bitnami/etcd:${LIMITS_ETCD_SERVER_VERSION:-latest}" environment: - ALLOW_NONE_AUTHENTICATION=yes - ETCD_ADVERTISE_CLIENT_URLS=http://etcd:2379 - ETCD_ELECTION_TIMEOUT=600 ports: - 2379:2379 - 2380:2380 # memcached memcached-1: image: "memcached:${LIMITS_MEMCACHED_SERVER_VERSION:-latest}" command: -p 22122 ports: - 22122:22122 memcached-2: image: "memcached:${LIMITS_MEMCACHED_SERVER_VERSION:-latest}" command: -p 22123 ports: - 22123:22123 memcached-uds: image: "memcached:${LIMITS_MEMCACHED_SERVER_VERSION:-latest}" command: sh -c "test ${HOST_OS} = \"Darwin\" && exit || memcached -s /sockets/limits.memcached.sock -a 777" volumes: - type: bind source: /tmp/ target: /sockets/ # redis sentinel redis-sentinel-master: image: "redis:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}" command: redis-server --port 6380 ports: - '6380:6380' redis-sentinel-slave: image: "redis:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}" depends_on: [redis-sentinel-master] command: redis-server --port 6381 --slaveof ${HOST_IP} 6380 --slave-announce-ip ${HOST_IP} ports: - '6381:6381' redis-sentinel-master-auth: image: "redis:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}" command: redis-server --port 6382 --requirepass sekret ports: - '6382:6382' redis-sentinel-slave-auth: image: "redis:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}" depends_on: [redis-sentinel-master-auth] command: redis-server --port 6383 --slaveof ${HOST_IP} 6382 --slave-announce-ip ${HOST_IP} --requirepass sekret --masterauth sekret ports: - '6383:6383' redis-sentinel: image: 'bitnami/redis-sentinel:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}' depends_on: [redis-sentinel-slave] environment: - REDIS_MASTER_HOST=${HOST_IP} - REDIS_MASTER_PORT_NUMBER=6380 ports: - '26379:26379' redis-sentinel-auth: image: 'bitnami/redis-sentinel:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}' depends_on: [redis-sentinel-slave-auth] environment: - REDIS_MASTER_HOST=${HOST_IP} - REDIS_MASTER_PASSWORD=sekret - REDIS_MASTER_PORT_NUMBER=6382 - REDIS_SENTINEL_PASSWORD=sekret ports: - '36379:26379' # cluster redis-cluster-1: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7001 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7001:7001' - '17001:17001' redis-cluster-2: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7002 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7002:7002' - '17002:17002' redis-cluster-3: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7003 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7003:7003' - '17003:17003' redis-cluster-4: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7004 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7004:7004' - '17004:17004' redis-cluster-5: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7005 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7005:7005' - '17005:17005' redis-cluster-6: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7006 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7006:7006' - '17006:17006' redis-cluster-init: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: bash -c "echo yes | redis-cli --cluster create --cluster-replicas 1 ${HOST_IP}:7001 ${HOST_IP}:7002 ${HOST_IP}:7003 ${HOST_IP}:7004 ${HOST_IP}:7005 ${HOST_IP}:7006" depends_on: [redis-cluster-1, redis-cluster-2, redis-cluster-3, redis-cluster-4, redis-cluster-5, redis-cluster-6] redis-ssl-cluster-1: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8301 --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8301:8301' - '18301:18301' volumes: - ./tests/tls:/tls redis-ssl-cluster-2: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8302 --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8302:8302' - '18302:18302' volumes: - ./tests/tls:/tls redis-ssl-cluster-3: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8303 --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8303:8303' - '18303:18303' volumes: - ./tests/tls:/tls redis-ssl-cluster-4: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8304 --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8304:8304' - '18304:18304' volumes: - ./tests/tls:/tls redis-ssl-cluster-5: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8305 --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8305:8305' - '18305:18305' volumes: - ./tests/tls:/tls redis-ssl-cluster-6: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8306 --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8306:8306' - '18306:18306' volumes: - ./tests/tls:/tls redis-ssl-cluster-init: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: bash -c "echo yes | redis-cli --cluster create --cluster-replicas 1 ${HOST_IP}:8301 ${HOST_IP}:8302 ${HOST_IP}:8303 ${HOST_IP}:8304 ${HOST_IP}:8305 ${HOST_IP}:8306 --tls --cert /tls/redis.crt --key /tls/redis.key --cacert /tls/ca.crt " depends_on: [redis-ssl-cluster-1, redis-ssl-cluster-2, redis-ssl-cluster-3, redis-ssl-cluster-4, redis-ssl-cluster-5, redis-ssl-cluster-6] volumes: - ./tests/tls:/tls redis-cluster-auth-1: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 8400 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --requirepass sekret ports: - '8400:8400' - '18400:18400' redis-cluster-auth-2: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 8401 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --requirepass sekret ports: - '8401:8401' - '18401:18401' redis-cluster-auth-3: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 8402 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --requirepass sekret ports: - '8402:8402' - '18402:18402' redis-cluster-auth-init: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: bash -c "echo yes | redis-cli --cluster create --cluster-replicas 0 ${HOST_IP}:8400 ${HOST_IP}:8401 ${HOST_IP}:8402 -a sekret" depends_on: [redis-cluster-auth-1, redis-cluster-auth-2, redis-cluster-auth-3] redis-basic: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7379 ports: - '7379:7379' redis-auth: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7389 --requirepass sekret ports: - '7389:7389' redis-ssl: image: "redis:${LIMITS_REDIS_SERVER_SSL_VERSION:-latest}" command: redis-server --port 0 --tls-port 8379 --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8379:8379' volumes: - ./tests/tls:/tls redis-uds: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: sh -c "test ${HOST_OS} = \"Darwin\" && exit || redis-server --port 0 --unixsocket /sockets/limits.redis.sock --unixsocketperm 777" volumes: - type: bind source: /tmp/ target: /sockets/ mongodb: image: "mongo:${LIMITS_MONGODB_SERVER_VERSION:-latest}" ports: - '37017:27017' dragonfly: image: "docker.dragonflydb.io/dragonflydb/dragonfly:latest" ports: - '21379:6379' ulimits: memlock: -1 valkey-basic: image: "valkey/valkey:${LIMITS_VALKEY_SERVER_VERSION:-latest}" command: valkey-server --port 6379 ${DEFAULT_ARGS---enable-debug-command yes} --appendonly yes ports: - '12379:6379' valkey-cluster-1: image: "valkey/valkey:${LIMITS_VALKEY_SERVER_VERSION:-latest}" command: valkey-server --port 2001 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '2001:2001' - '12001:12001' valkey-cluster-2: image: "valkey/valkey:${LIMITS_VALKEY_SERVER_VERSION:-latest}" command: valkey-server --port 2002 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '2002:2002' - '12002:12002' valkey-cluster-3: image: "valkey/valkey:${LIMITS_VALKEY_SERVER_VERSION:-latest}" command: valkey-server --port 2003 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '2003:2003' - '12003:12003' valkey-cluster-4: image: "valkey/valkey:${LIMITS_VALKEY_SERVER_VERSION:-latest}" command: valkey-server --port 2004 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '2004:2004' - '12004:12004' valkey-cluster-5: image: "valkey/valkey:${LIMITS_VALKEY_SERVER_VERSION:-latest}" command: valkey-server --port 2005 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '2005:2005' - '12005:12005' valkey-cluster-6: image: "valkey/valkey:${LIMITS_VALKEY_SERVER_VERSION:-latest}" command: valkey-server --port 2006 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '2006:2006' - '12006:12006' valkey-cluster-init: image: "valkey/valkey:${LIMITS_VALKEY_SERVER_VERSION:-latest}" command: bash -c "echo yes | valkey-cli --cluster create --cluster-replicas 1 ${HOST_IP}:2001 ${HOST_IP}:2002 ${HOST_IP}:2003 ${HOST_IP}:2004 ${HOST_IP}:2005 ${HOST_IP}:2006" depends_on: [valkey-cluster-1, valkey-cluster-2, valkey-cluster-3, valkey-cluster-4, valkey-cluster-5, valkey-cluster-6] limits-4.4.1/limits/000077500000000000000000000000001476517132700143375ustar00rootroot00000000000000limits-4.4.1/limits/__init__.py000066400000000000000000000013541476517132700164530ustar00rootroot00000000000000""" Rate limiting with commonly used storage backends """ from __future__ import annotations from . import _version, aio, storage, strategies from .limits import ( RateLimitItem, RateLimitItemPerDay, RateLimitItemPerHour, RateLimitItemPerMinute, RateLimitItemPerMonth, RateLimitItemPerSecond, RateLimitItemPerYear, ) from .util import WindowStats, parse, parse_many __all__ = [ "RateLimitItem", "RateLimitItemPerDay", "RateLimitItemPerHour", "RateLimitItemPerMinute", "RateLimitItemPerMonth", "RateLimitItemPerSecond", "RateLimitItemPerYear", "WindowStats", "aio", "parse", "parse_many", "storage", "strategies", ] __version__ = _version.get_versions()["version"] limits-4.4.1/limits/_version.py000066400000000000000000000601241476517132700165400ustar00rootroot00000000000000# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. # Generated by versioneer-0.29 # https://github.com/python-versioneer/python-versioneer """Git implementation of _version.py.""" import errno import functools import os import re import subprocess import sys from typing import Any, Callable, Dict, List, Optional, Tuple def get_keywords() -> Dict[str, str]: """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = " (tag: 4.4.1, stable, 4.x)" git_full = "a9d5a1a8a9a7dd9686861117364a3d4cd786cd5b" git_date = "2025-03-14 20:24:07 -0700" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" VCS: str style: str tag_prefix: str parentdir_prefix: str versionfile_source: str verbose: bool def get_config() -> VersioneerConfig: """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440-pre" cfg.tag_prefix = "" cfg.parentdir_prefix = "limits-" cfg.versionfile_source = "limits/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command( commands: List[str], args: List[str], cwd: Optional[str] = None, verbose: bool = False, hide_stderr: bool = False, env: Optional[Dict[str, str]] = None, ) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs: Dict[str, Any] = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen( [command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs, ) break except OSError as e: if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, process.returncode return stdout, process.returncode def versions_from_parentdir( parentdir_prefix: str, root: str, verbose: bool, ) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return { "version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None, } rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print( "Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix) ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords: Dict[str, str] = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords( keywords: Dict[str, str], tag_prefix: str, verbose: bool, ) -> Dict[str, Any]: """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r"\d", r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r"\d", r): continue if verbose: print("picking %s" % r) return { "version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date, } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return { "version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None, } @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs( tag_prefix: str, root: str, verbose: bool, runner: Callable = run_command ) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner( GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", f"{tag_prefix}[[:digit:]]*", ], cwd=root, ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces: Dict[str, Any]) -> str: """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces: Dict[str, Any]) -> str: """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) else: rendered += ".post0.dev%d" % (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return { "version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None, } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return { "version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date"), } def get_versions() -> Dict[str, Any]: """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None, } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None, } limits-4.4.1/limits/aio/000077500000000000000000000000001476517132700151075ustar00rootroot00000000000000limits-4.4.1/limits/aio/__init__.py000066400000000000000000000001661476517132700172230ustar00rootroot00000000000000from __future__ import annotations from . import storage, strategies __all__ = [ "storage", "strategies", ] limits-4.4.1/limits/aio/storage/000077500000000000000000000000001476517132700165535ustar00rootroot00000000000000limits-4.4.1/limits/aio/storage/__init__.py000066400000000000000000000012671476517132700206720ustar00rootroot00000000000000""" Implementations of storage backends to be used with :class:`limits.aio.strategies.RateLimiter` strategies """ from __future__ import annotations from .base import MovingWindowSupport, SlidingWindowCounterSupport, Storage from .etcd import EtcdStorage from .memcached import MemcachedStorage from .memory import MemoryStorage from .mongodb import MongoDBStorage from .redis import RedisClusterStorage, RedisSentinelStorage, RedisStorage __all__ = [ "EtcdStorage", "MemcachedStorage", "MemoryStorage", "MongoDBStorage", "MovingWindowSupport", "RedisClusterStorage", "RedisSentinelStorage", "RedisStorage", "SlidingWindowCounterSupport", "Storage", ] limits-4.4.1/limits/aio/storage/base.py000066400000000000000000000144471476517132700200510ustar00rootroot00000000000000from __future__ import annotations import functools from abc import ABC, abstractmethod from deprecated.sphinx import versionadded from limits import errors from limits.storage.registry import StorageRegistry from limits.typing import ( Any, Awaitable, Callable, P, R, cast, ) from limits.util import LazyDependency def _wrap_errors( fn: Callable[P, Awaitable[R]], ) -> Callable[P, Awaitable[R]]: @functools.wraps(fn) async def inner(*args: P.args, **kwargs: P.kwargs) -> R: # type: ignore[misc] instance = cast(Storage, args[0]) try: return await fn(*args, **kwargs) except instance.base_exceptions as exc: if instance.wrap_exceptions: raise errors.StorageError(exc) from exc raise return inner @versionadded(version="2.1") class Storage(LazyDependency, metaclass=StorageRegistry): """ Base class to extend when implementing an async storage backend. """ STORAGE_SCHEME: list[str] | None """The storage schemes to register against this implementation""" def __init_subclass__(cls, **kwargs: Any) -> None: # type:ignore[explicit-any] super().__init_subclass__(**kwargs) for method in { "incr", "get", "get_expiry", "check", "reset", "clear", }: setattr(cls, method, _wrap_errors(getattr(cls, method))) super().__init_subclass__(**kwargs) def __init__( self, uri: str | None = None, wrap_exceptions: bool = False, **options: float | str | bool, ) -> None: """ :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. """ super().__init__() self.wrap_exceptions = wrap_exceptions @property @abstractmethod def base_exceptions(self) -> type[Exception] | tuple[type[Exception], ...]: raise NotImplementedError @abstractmethod async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ raise NotImplementedError @abstractmethod async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ raise NotImplementedError @abstractmethod async def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ raise NotImplementedError @abstractmethod async def check(self) -> bool: """ check if storage is healthy """ raise NotImplementedError @abstractmethod async def reset(self) -> int | None: """ reset storage to clear limits """ raise NotImplementedError @abstractmethod async def clear(self, key: str) -> None: """ resets the rate limit key :param key: the key to clear rate limits for """ raise NotImplementedError class MovingWindowSupport(ABC): """ Abstract base class for async storages that support the :ref:`strategies:moving window` strategy """ def __init_subclass__(cls, **kwargs: Any) -> None: # type: ignore[explicit-any] for method in { "acquire_entry", "get_moving_window", }: setattr( cls, method, _wrap_errors(getattr(cls, method)), ) super().__init_subclass__(**kwargs) @abstractmethod async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ raise NotImplementedError @abstractmethod async def get_moving_window( self, key: str, limit: int, expiry: int ) -> tuple[float, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ raise NotImplementedError class SlidingWindowCounterSupport(ABC): """ Abstract base class for async storages that support the :ref:`strategies:sliding window counter` strategy """ def __init_subclass__(cls, **kwargs: Any) -> None: # type: ignore[explicit-any] for method in {"acquire_sliding_window_entry", "get_sliding_window"}: setattr( cls, method, _wrap_errors(getattr(cls, method)), ) super().__init_subclass__(**kwargs) @abstractmethod async def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: """ Acquire an entry if the weighted count of the current and previous windows is less than or equal to the limit :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ raise NotImplementedError @abstractmethod async def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: """ Return the previous and current window information. :param key: the rate limit key :param expiry: the rate limit expiry, needed to compute the key in some implementations :return: a tuple of (int, float, int, float) with the following information: - previous window counter - previous window TTL - current window counter - current window TTL """ raise NotImplementedError limits-4.4.1/limits/aio/storage/etcd.py000066400000000000000000000117311476517132700200470ustar00rootroot00000000000000from __future__ import annotations import asyncio import time import urllib.parse from typing import TYPE_CHECKING from deprecated.sphinx import deprecated from limits.aio.storage.base import Storage from limits.errors import ConcurrentUpdateError if TYPE_CHECKING: import aetcd @deprecated(version="4.4") class EtcdStorage(Storage): """ Rate limit storage with etcd as backend. Depends on :pypi:`aetcd`. """ STORAGE_SCHEME = ["async+etcd"] """The async storage scheme for etcd""" DEPENDENCIES = ["aetcd"] PREFIX = "limits" MAX_RETRIES = 5 def __init__( self, uri: str, max_retries: int = MAX_RETRIES, wrap_exceptions: bool = False, **options: str, ) -> None: """ :param uri: etcd location of the form ``async+etcd://host:port``, :param max_retries: Maximum number of attempts to retry in the case of concurrent updates to a rate limit key :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`aetcd.client.Client` :raise ConfigurationError: when :pypi:`aetcd` is not available """ parsed = urllib.parse.urlparse(uri) self.lib = self.dependencies["aetcd"].module self.storage: aetcd.Client = self.lib.Client( host=parsed.hostname, port=parsed.port, **options ) self.max_retries = max_retries super().__init__(uri, wrap_exceptions=wrap_exceptions) @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return self.lib.ClientError # type: ignore[no-any-return] def prefixed_key(self, key: str) -> bytes: return f"{self.PREFIX}/{key}".encode() async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: retries = 0 etcd_key = self.prefixed_key(key) while retries < self.max_retries: now = time.time() lease = await self.storage.lease(expiry) window_end = now + expiry create_attempt = await self.storage.transaction( compare=[self.storage.transactions.create(etcd_key) == b"0"], success=[ self.storage.transactions.put( etcd_key, f"{amount}:{window_end}".encode(), lease=lease.id ) ], failure=[self.storage.transactions.get(etcd_key)], ) if create_attempt[0]: return amount else: cur = create_attempt[1][0][0][1] cur_value, window_end = cur.value.split(b":") window_end = float(window_end) if window_end <= now: await asyncio.gather( self.storage.revoke_lease(cur.lease), self.storage.delete(etcd_key), ) else: if elastic_expiry: await self.storage.refresh_lease(cur.lease) window_end = now + expiry new = int(cur_value) + amount if ( await self.storage.transaction( compare=[ self.storage.transactions.value(etcd_key) == cur.value ], success=[ self.storage.transactions.put( etcd_key, f"{new}:{window_end}".encode(), lease=cur.lease, ) ], failure=[], ) )[0]: return new retries += 1 raise ConcurrentUpdateError(key, retries) async def get(self, key: str) -> int: cur = await self.storage.get(self.prefixed_key(key)) if cur: amount, expiry = cur.value.split(b":") if float(expiry) > time.time(): return int(amount) return 0 async def get_expiry(self, key: str) -> float: cur = await self.storage.get(self.prefixed_key(key)) if cur: window_end = float(cur.value.split(b":")[1]) return window_end return time.time() async def check(self) -> bool: try: await self.storage.status() return True except: # noqa return False async def reset(self) -> int | None: return (await self.storage.delete_prefix(f"{self.PREFIX}/".encode())).deleted async def clear(self, key: str) -> None: await self.storage.delete(self.prefixed_key(key)) limits-4.4.1/limits/aio/storage/memcached.py000066400000000000000000000241711476517132700210400ustar00rootroot00000000000000from __future__ import annotations import time import urllib.parse from collections.abc import Iterable from math import ceil, floor from deprecated.sphinx import versionadded from limits.aio.storage.base import SlidingWindowCounterSupport, Storage from limits.storage.base import TimestampedSlidingWindow from limits.typing import EmcacheClientP, ItemP @versionadded(version="2.1") class MemcachedStorage(Storage, SlidingWindowCounterSupport, TimestampedSlidingWindow): """ Rate limit storage with memcached as backend. Depends on :pypi:`emcache` """ STORAGE_SCHEME = ["async+memcached"] """The storage scheme for memcached to be used in an async context""" DEPENDENCIES = ["emcache"] def __init__( self, uri: str, wrap_exceptions: bool = False, **options: float | str | bool, ) -> None: """ :param uri: memcached location of the form ``async+memcached://host:port,host:port`` :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`emcache.Client` :raise ConfigurationError: when :pypi:`emcache` is not available """ parsed = urllib.parse.urlparse(uri) self.hosts = [] for host, port in ( loc.split(":") for loc in parsed.netloc.strip().split(",") if loc.strip() ): self.hosts.append((host, int(port))) self._options = options self._storage = None super().__init__(uri, wrap_exceptions=wrap_exceptions, **options) self.dependency = self.dependencies["emcache"].module @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return ( self.dependency.ClusterNoAvailableNodes, self.dependency.CommandError, ) async def get_storage(self) -> EmcacheClientP: if not self._storage: self._storage = await self.dependency.create_client( [self.dependency.MemcachedHostAddress(h, p) for h, p in self.hosts], **self._options, ) assert self._storage return self._storage async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ item = await (await self.get_storage()).get(key.encode("utf-8")) return item and int(item.value) or 0 async def get_many(self, keys: Iterable[str]) -> dict[bytes, ItemP]: """ Return multiple counters at once :param keys: the keys to get the counter values for """ return await (await self.get_storage()).get_many( [k.encode("utf-8") for k in keys] ) async def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ await (await self.get_storage()).delete(key.encode("utf-8")) async def decr(self, key: str, amount: int = 1, noreply: bool = False) -> int: """ decrements the counter for a given rate limit key retursn 0 if the key doesn't exist or if noreply is set to True :param key: the key to decrement :param amount: the number to decrement by :param noreply: set to True to ignore the memcached response """ storage = await self.get_storage() limit_key = key.encode("utf-8") try: value = await storage.decrement(limit_key, amount, noreply=noreply) or 0 except self.dependency.NotFoundCommandError: value = 0 return value async def incr( self, key: str, expiry: float, elastic_expiry: bool = False, amount: int = 1, set_expiration_key: bool = True, ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by :param set_expiration_key: if set to False, the expiration time won't be stored but the key will still expire """ storage = await self.get_storage() limit_key = key.encode("utf-8") expire_key = self._expiration_key(key).encode() value = None try: value = await storage.increment(limit_key, amount) or amount if elastic_expiry: await storage.touch(limit_key, exptime=ceil(expiry)) if set_expiration_key: await storage.set( expire_key, str(expiry + time.time()).encode("utf-8"), exptime=ceil(expiry), noreply=False, ) return value except self.dependency.NotFoundCommandError: # Incrementation failed because the key doesn't exist storage = await self.get_storage() try: await storage.add(limit_key, f"{amount}".encode(), exptime=ceil(expiry)) if set_expiration_key: await storage.set( expire_key, str(expiry + time.time()).encode("utf-8"), exptime=ceil(expiry), noreply=False, ) value = amount except self.dependency.NotStoredStorageCommandError: # Coult not add the key, probably because a concurrent call has added it storage = await self.get_storage() value = await storage.increment(limit_key, amount) or amount if elastic_expiry: await storage.touch(limit_key, exptime=ceil(expiry)) if set_expiration_key: await storage.set( expire_key, str(expiry + time.time()).encode("utf-8"), exptime=ceil(expiry), noreply=False, ) return value async def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ storage = await self.get_storage() item = await storage.get(self._expiration_key(key).encode("utf-8")) return item and float(item.value) or time.time() def _expiration_key(self, key: str) -> str: """ Return the expiration key for the given counter key. Memcached doesn't natively return the expiration time or TTL for a given key, so we implement the expiration time on a separate key. """ return key + "/expires" async def check(self) -> bool: """ Check if storage is healthy by calling the ``get`` command on the key ``limiter-check`` """ try: storage = await self.get_storage() await storage.get(b"limiter-check") return True except: # noqa return False async def reset(self) -> int | None: raise NotImplementedError async def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: if amount > limit: return False now = time.time() previous_key, current_key = self.sliding_window_keys(key, expiry, now) ( previous_count, previous_ttl, current_count, _, ) = await self._get_sliding_window_info(previous_key, current_key, expiry, now) t0 = time.time() weighted_count = previous_count * previous_ttl / expiry + current_count if floor(weighted_count) + amount > limit: return False else: # Hit, increase the current counter. # If the counter doesn't exist yet, set twice the theorical expiry. # We don't need the expiration key as it is estimated with the timestamps directly. current_count = await self.incr( current_key, 2 * expiry, amount=amount, set_expiration_key=False ) t1 = time.time() actualised_previous_ttl = max(0, previous_ttl - (t1 - t0)) weighted_count = ( previous_count * actualised_previous_ttl / expiry + current_count ) if floor(weighted_count) > limit: # Another hit won the race condition: revert the incrementation and refuse this hit # Limitation: during high concurrency at the end of the window, # the counter is shifted and cannot be decremented, so less requests than expected are allowed. await self.decr(current_key, amount, noreply=True) return False return True async def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: now = time.time() previous_key, current_key = self.sliding_window_keys(key, expiry, now) return await self._get_sliding_window_info( previous_key, current_key, expiry, now ) async def _get_sliding_window_info( self, previous_key: str, current_key: str, expiry: int, now: float ) -> tuple[int, float, int, float]: result = await self.get_many([previous_key, current_key]) raw_previous_count = result.get(previous_key.encode("utf-8")) raw_current_count = result.get(current_key.encode("utf-8")) current_count = raw_current_count and int(raw_current_count.value) or 0 previous_count = raw_previous_count and int(raw_previous_count.value) or 0 if previous_count == 0: previous_ttl = float(0) else: previous_ttl = (1 - (((now - expiry) / expiry) % 1)) * expiry current_ttl = (1 - ((now / expiry) % 1)) * expiry + expiry return previous_count, previous_ttl, current_count, current_ttl limits-4.4.1/limits/aio/storage/memory.py000066400000000000000000000230371476517132700204420ustar00rootroot00000000000000from __future__ import annotations import asyncio import time from collections import Counter, defaultdict from math import floor from deprecated.sphinx import versionadded import limits.typing from limits.aio.storage.base import ( MovingWindowSupport, SlidingWindowCounterSupport, Storage, ) from limits.storage.base import TimestampedSlidingWindow class Entry: def __init__(self, expiry: int) -> None: self.atime = time.time() self.expiry = self.atime + expiry @versionadded(version="2.1") class MemoryStorage( Storage, MovingWindowSupport, SlidingWindowCounterSupport, TimestampedSlidingWindow ): """ rate limit storage using :class:`collections.Counter` as an in memory storage for fixed and elastic window strategies, and a simple list to implement moving window strategy. """ STORAGE_SCHEME = ["async+memory"] """ The storage scheme for in process memory storage for use in an async context """ def __init__( self, uri: str | None = None, wrap_exceptions: bool = False, **_: str ) -> None: self.storage: limits.typing.Counter[str] = Counter() self.locks: defaultdict[str, asyncio.Lock] = defaultdict(asyncio.Lock) self.expirations: dict[str, float] = {} self.events: dict[str, list[Entry]] = {} self.timer: asyncio.Task[None] | None = None super().__init__(uri, wrap_exceptions=wrap_exceptions, **_) def __getstate__(self) -> dict[str, limits.typing.Any]: # type: ignore[explicit-any] state = self.__dict__.copy() del state["timer"] del state["locks"] return state def __setstate__(self, state: dict[str, limits.typing.Any]) -> None: # type: ignore[explicit-any] self.__dict__.update(state) self.timer = None self.locks = defaultdict(asyncio.Lock) asyncio.ensure_future(self.__schedule_expiry()) async def __expire_events(self) -> None: for key in self.events.keys(): async with self.locks[key]: for event in list(self.events[key]): if event.expiry <= time.time() and event in self.events[key]: self.events[key].remove(event) if not self.events.get(key, None): self.events.pop(key, None) self.locks.pop(key, None) for key in list(self.expirations.keys()): if self.expirations[key] <= time.time(): self.storage.pop(key, None) self.expirations.pop(key, None) self.locks.pop(key, None) async def __schedule_expiry(self) -> None: if not self.timer or self.timer.done(): self.timer = asyncio.create_task(self.__expire_events()) @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return ValueError async def incr( self, key: str, expiry: float, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ await self.get(key) await self.__schedule_expiry() async with self.locks[key]: self.storage[key] += amount if elastic_expiry or self.storage[key] == amount: self.expirations[key] = time.time() + expiry return self.storage.get(key, amount) async def decr(self, key: str, amount: int = 1) -> int: """ decrements the counter for a given rate limit key. 0 is the minimum allowed value. :param amount: the number to increment by """ await self.get(key) await self.__schedule_expiry() async with self.locks[key]: self.storage[key] = max(self.storage[key] - amount, 0) return self.storage.get(key, amount) async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ if self.expirations.get(key, 0) <= time.time(): self.storage.pop(key, None) self.expirations.pop(key, None) self.locks.pop(key, None) return self.storage.get(key, 0) async def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ self.storage.pop(key, None) self.expirations.pop(key, None) self.events.pop(key, None) self.locks.pop(key, None) async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ if amount > limit: return False await self.__schedule_expiry() async with self.locks[key]: self.events.setdefault(key, []) timestamp = time.time() try: entry: Entry | None = self.events[key][limit - amount] except IndexError: entry = None if entry and entry.atime >= timestamp - expiry: return False else: self.events[key][:0] = [Entry(expiry) for _ in range(amount)] return True async def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ return self.expirations.get(key, time.time()) async def get_num_acquired(self, key: str, expiry: int) -> int: """ returns the number of entries already acquired :param key: rate limit key to acquire an entry in :param expiry: expiry of the entry """ timestamp = time.time() return ( len([k for k in self.events.get(key, []) if k.atime >= timestamp - expiry]) if self.events.get(key) else 0 ) # FIXME: arg limit is not used async def get_moving_window( self, key: str, limit: int, expiry: int ) -> tuple[float, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ timestamp = time.time() acquired = await self.get_num_acquired(key, expiry) for item in self.events.get(key, [])[::-1]: if item.atime >= timestamp - expiry: return item.atime, acquired return timestamp, acquired async def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: if amount > limit: return False now = time.time() previous_key, current_key = self.sliding_window_keys(key, expiry, now) ( previous_count, previous_ttl, current_count, _, ) = await self._get_sliding_window_info(previous_key, current_key, expiry, now) weighted_count = previous_count * previous_ttl / expiry + current_count if floor(weighted_count) + amount > limit: return False else: # Hit, increase the current counter. # If the counter doesn't exist yet, set twice the theorical expiry. current_count = await self.incr(current_key, 2 * expiry, amount=amount) weighted_count = previous_count * previous_ttl / expiry + current_count if floor(weighted_count) > limit: # Another hit won the race condition: revert the incrementation and refuse this hit # Limitation: during high concurrency at the end of the window, # the counter is shifted and cannot be decremented, so less requests than expected are allowed. await self.decr(current_key, amount) # print("Concurrent call, reverting the counter increase") return False return True async def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: now = time.time() previous_key, current_key = self.sliding_window_keys(key, expiry, now) return await self._get_sliding_window_info( previous_key, current_key, expiry, now ) async def _get_sliding_window_info( self, previous_key: str, current_key: str, expiry: int, now: float, ) -> tuple[int, float, int, float]: previous_count = await self.get(previous_key) current_count = await self.get(current_key) if previous_count == 0: previous_ttl = float(0) else: previous_ttl = (1 - (((now - expiry) / expiry) % 1)) * expiry current_ttl = (1 - ((now / expiry) % 1)) * expiry + expiry return previous_count, previous_ttl, current_count, current_ttl async def check(self) -> bool: """ check if storage is healthy """ return True async def reset(self) -> int | None: num_items = max(len(self.storage), len(self.events)) self.storage.clear() self.expirations.clear() self.events.clear() self.locks.clear() return num_items limits-4.4.1/limits/aio/storage/mongodb.py000066400000000000000000000455561476517132700205710ustar00rootroot00000000000000from __future__ import annotations import asyncio import datetime import time from deprecated.sphinx import versionadded, versionchanged from limits.aio.storage.base import ( MovingWindowSupport, SlidingWindowCounterSupport, Storage, ) from limits.typing import ( ParamSpec, TypeVar, cast, ) from limits.util import get_dependency P = ParamSpec("P") R = TypeVar("R") @versionadded(version="2.1") @versionchanged( version="3.14.0", reason="Added option to select custom collection names for windows & counters", ) class MongoDBStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport): """ Rate limit storage with MongoDB as backend. Depends on :pypi:`motor` """ STORAGE_SCHEME = ["async+mongodb", "async+mongodb+srv"] """ The storage scheme for MongoDB for use in an async context """ DEPENDENCIES = ["motor.motor_asyncio", "pymongo"] def __init__( self, uri: str, database_name: str = "limits", counter_collection_name: str = "counters", window_collection_name: str = "windows", wrap_exceptions: bool = False, **options: float | str | bool, ) -> None: """ :param uri: uri of the form ``async+mongodb://[user:password]@host:port?...``, This uri is passed directly to :class:`~motor.motor_asyncio.AsyncIOMotorClient` :param database_name: The database to use for storing the rate limit collections. :param counter_collection_name: The collection name to use for individual counters used in fixed window strategies :param window_collection_name: The collection name to use for sliding & moving window storage :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed to the constructor of :class:`~motor.motor_asyncio.AsyncIOMotorClient` :raise ConfigurationError: when the :pypi:`motor` or :pypi:`pymongo` are not available """ uri = uri.replace("async+mongodb", "mongodb", 1) super().__init__(uri, wrap_exceptions=wrap_exceptions, **options) self.dependency = self.dependencies["motor.motor_asyncio"] self.proxy_dependency = self.dependencies["pymongo"] self.lib_errors, _ = get_dependency("pymongo.errors") self.storage = self.dependency.module.AsyncIOMotorClient(uri, **options) # TODO: Fix this hack. It was noticed when running a benchmark # with FastAPI - however - doesn't appear in unit tests or in an isolated # use. Reference: https://jira.mongodb.org/browse/MOTOR-822 self.storage.get_io_loop = asyncio.get_running_loop self.__database_name = database_name self.__collection_mapping = { "counters": counter_collection_name, "windows": window_collection_name, } self.__indices_created = False @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return self.lib_errors.PyMongoError # type: ignore @property def database(self): # type: ignore return self.storage.get_database(self.__database_name) async def create_indices(self) -> None: if not self.__indices_created: await asyncio.gather( self.database[self.__collection_mapping["counters"]].create_index( "expireAt", expireAfterSeconds=0 ), self.database[self.__collection_mapping["windows"]].create_index( "expireAt", expireAfterSeconds=0 ), ) self.__indices_created = True async def reset(self) -> int | None: """ Delete all rate limit keys in the rate limit collections (counters, windows) """ num_keys = sum( await asyncio.gather( self.database[self.__collection_mapping["counters"]].count_documents( {} ), self.database[self.__collection_mapping["windows"]].count_documents({}), ) ) await asyncio.gather( self.database[self.__collection_mapping["counters"]].drop(), self.database[self.__collection_mapping["windows"]].drop(), ) return cast(int, num_keys) async def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ await asyncio.gather( self.database[self.__collection_mapping["counters"]].find_one_and_delete( {"_id": key} ), self.database[self.__collection_mapping["windows"]].find_one_and_delete( {"_id": key} ), ) async def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ counter = await self.database[self.__collection_mapping["counters"]].find_one( {"_id": key} ) return ( (counter["expireAt"] if counter else datetime.datetime.now()) .replace(tzinfo=datetime.timezone.utc) .timestamp() ) async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ counter = await self.database[self.__collection_mapping["counters"]].find_one( { "_id": key, "expireAt": {"$gte": datetime.datetime.now(datetime.timezone.utc)}, }, projection=["count"], ) return counter and counter["count"] or 0 async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ await self.create_indices() expiration = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( seconds=expiry ) response = await self.database[ self.__collection_mapping["counters"] ].find_one_and_update( {"_id": key}, [ { "$set": { "count": { "$cond": { "if": {"$lt": ["$expireAt", "$$NOW"]}, "then": amount, "else": {"$add": ["$count", amount]}, } }, "expireAt": { "$cond": { "if": {"$lt": ["$expireAt", "$$NOW"]}, "then": expiration, "else": (expiration if elastic_expiry else "$expireAt"), } }, } }, ], upsert=True, projection=["count"], return_document=self.proxy_dependency.module.ReturnDocument.AFTER, ) return int(response["count"]) async def check(self) -> bool: """ Check if storage is healthy by calling :meth:`motor.motor_asyncio.AsyncIOMotorClient.server_info` """ try: await self.storage.server_info() return True except: # noqa: E722 return False async def get_moving_window( self, key: str, limit: int, expiry: int ) -> tuple[float, int]: """ returns the starting point and the number of entries in the moving window :param str key: rate limit key :param int expiry: expiry of entry :return: (start of window, number of acquired entries) """ timestamp = time.time() if result := ( await self.database[self.__collection_mapping["windows"]] .aggregate( [ {"$match": {"_id": key}}, { "$project": { "entries": { "$filter": { "input": "$entries", "as": "entry", "cond": {"$gte": ["$$entry", timestamp - expiry]}, } } } }, {"$unwind": "$entries"}, { "$group": { "_id": "$_id", "min": {"$min": "$entries"}, "count": {"$sum": 1}, } }, ] ) .to_list(length=1) ): return result[0]["min"], result[0]["count"] return timestamp, 0 async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ await self.create_indices() if amount > limit: return False timestamp = time.time() try: updates: dict[ str, dict[str, datetime.datetime | dict[str, list[float] | int]], ] = { "$push": { "entries": { "$each": [timestamp] * amount, "$position": 0, "$slice": limit, } }, "$set": { "expireAt": ( datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(seconds=expiry) ) }, } await self.database[self.__collection_mapping["windows"]].update_one( { "_id": key, f"entries.{limit - amount}": {"$not": {"$gte": timestamp - expiry}}, }, updates, upsert=True, ) return True except self.proxy_dependency.module.errors.DuplicateKeyError: return False async def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: await self.create_indices() expiry_ms = expiry * 1000 result = await self.database[ self.__collection_mapping["windows"] ].find_one_and_update( {"_id": key}, [ { "$set": { "previousCount": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": {"$ifNull": ["$currentCount", 0]}, "else": {"$ifNull": ["$previousCount", 0]}, } }, } }, { "$set": { "currentCount": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": 0, "else": {"$ifNull": ["$currentCount", 0]}, } }, "expiresAt": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": { "$cond": { "if": {"$gt": ["$expiresAt", 0]}, "then": {"$add": ["$expiresAt", expiry_ms]}, "else": {"$add": ["$$NOW", 2 * expiry_ms]}, } }, "else": "$expiresAt", } }, } }, { "$set": { "curWeightedCount": { "$floor": { "$add": [ { "$multiply": [ "$previousCount", { "$divide": [ { "$max": [ 0, { "$subtract": [ "$expiresAt", { "$add": [ "$$NOW", expiry_ms, ] }, ] }, ] }, expiry_ms, ] }, ] }, "$currentCount", ] } } } }, { "$set": { "currentCount": { "$cond": { "if": { "$lte": [ {"$add": ["$curWeightedCount", amount]}, limit, ] }, "then": {"$add": ["$currentCount", amount]}, "else": "$currentCount", } } } }, { "$set": { "_acquired": { "$lte": [{"$add": ["$curWeightedCount", amount]}, limit] } } }, {"$unset": ["curWeightedCount"]}, ], return_document=self.proxy_dependency.module.ReturnDocument.AFTER, upsert=True, ) return cast(bool, result["_acquired"]) async def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: expiry_ms = expiry * 1000 if result := await self.database[ self.__collection_mapping["windows"] ].find_one_and_update( {"_id": key}, [ { "$set": { "previousCount": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": {"$ifNull": ["$currentCount", 0]}, "else": {"$ifNull": ["$previousCount", 0]}, } }, "currentCount": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": 0, "else": {"$ifNull": ["$currentCount", 0]}, } }, "expiresAt": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": {"$add": ["$expiresAt", expiry_ms]}, "else": "$expiresAt", } }, } } ], return_document=self.proxy_dependency.module.ReturnDocument.AFTER, projection=["currentCount", "previousCount", "expiresAt"], ): expires_at = ( (result["expiresAt"].replace(tzinfo=datetime.timezone.utc).timestamp()) if result.get("expiresAt") else time.time() ) current_ttl = max(0, expires_at - time.time()) prev_ttl = max(0, current_ttl - expiry if result["previousCount"] else 0) return ( result["previousCount"], prev_ttl, result["currentCount"], current_ttl, ) return 0, 0.0, 0, 0.0 limits-4.4.1/limits/aio/storage/redis/000077500000000000000000000000001476517132700176615ustar00rootroot00000000000000limits-4.4.1/limits/aio/storage/redis/__init__.py000066400000000000000000000335731476517132700220050ustar00rootroot00000000000000from __future__ import annotations from deprecated.sphinx import versionadded, versionchanged from packaging.version import Version from limits.aio.storage import MovingWindowSupport, SlidingWindowCounterSupport, Storage from limits.aio.storage.redis.bridge import RedisBridge from limits.aio.storage.redis.coredis import CoredisBridge from limits.aio.storage.redis.redispy import RedispyBridge from limits.aio.storage.redis.valkey import ValkeyBridge from limits.typing import Literal @versionadded(version="2.1") @versionchanged( version="4.2", reason=( "Added support for using the asyncio redis client from :pypi:`redis`" " through :paramref:`implementation`" ), ) @versionchanged( version="4.3", reason=( "Added support for using the asyncio redis client from :pypi:`valkey`" " through :paramref:`implementation` or if :paramref:`uri` has the" " ``async+valkey`` schema" ), ) class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport): """ Rate limit storage with redis as backend. Depends on :pypi:`coredis` or :pypi:`redis` """ STORAGE_SCHEME = [ "async+redis", "async+rediss", "async+redis+unix", "async+valkey", "async+valkeys", "async+valkey+unix", ] """ The storage schemes for redis to be used in an async context """ DEPENDENCIES = { "redis": Version("5.2.0"), "coredis": Version("3.4.0"), "valkey": Version("6.0"), } MODE: Literal["BASIC", "CLUSTER", "SENTINEL"] = "BASIC" bridge: RedisBridge storage_exceptions: tuple[Exception, ...] target_server: Literal["redis", "valkey"] def __init__( self, uri: str, wrap_exceptions: bool = False, implementation: Literal["redispy", "coredis", "valkey"] = "coredis", **options: float | str | bool, ) -> None: """ :param uri: uri of the form: - ``async+redis://[:password]@host:port`` - ``async+redis://[:password]@host:port/db`` - ``async+rediss://[:password]@host:port`` - ``async+redis+unix:///path/to/sock?db=0`` etc... This uri is passed directly to :meth:`coredis.Redis.from_url` or :meth:`redis.asyncio.client.Redis.from_url` with the initial ``async`` removed, except for the case of ``async+redis+unix`` where it is replaced with ``unix``. If the uri scheme is ``async+valkey`` the implementation used will be from :pypi:`valkey`. :param connection_pool: if provided, the redis client is initialized with the connection pool and any other params passed as :paramref:`options` :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param implementation: Whether to use the client implementation from - ``coredis``: :class:`coredis.Redis` - ``redispy``: :class:`redis.asyncio.client.Redis` - ``valkey``: :class:`valkey.asyncio.client.Valkey` :param options: all remaining keyword arguments are passed directly to the constructor of :class:`coredis.Redis` or :class:`redis.asyncio.client.Redis` :raise ConfigurationError: when the redis library is not available """ uri = uri.removeprefix("async+") self.target_server = "redis" if uri.startswith("redis") else "valkey" uri = uri.replace(f"{self.target_server}+unix", "unix") super().__init__(uri, wrap_exceptions=wrap_exceptions) self.options = options if self.target_server == "valkey" or implementation == "valkey": self.bridge = ValkeyBridge(uri, self.dependencies["valkey"].module) else: if implementation == "redispy": self.bridge = RedispyBridge(uri, self.dependencies["redis"].module) else: self.bridge = CoredisBridge(uri, self.dependencies["coredis"].module) self.configure_bridge() self.bridge.register_scripts() def _current_window_key(self, key: str) -> str: """ Return the current window's storage key (Sliding window strategy) Contrary to other strategies that have one key per rate limit item, this strategy has two keys per rate limit item than must be on the same machine. To keep the current key and the previous key on the same Redis cluster node, curly braces are added. Eg: "{constructed_key}" """ return f"{{{key}}}" def _previous_window_key(self, key: str) -> str: """ Return the previous window's storage key (Sliding window strategy). Curvy braces are added on the common pattern with the current window's key, so the current and the previous key are stored on the same Redis cluster node. Eg: "{constructed_key}/-1" """ return f"{self._current_window_key(key)}/-1" def configure_bridge(self) -> None: self.bridge.use_basic(**self.options) @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return self.bridge.base_exceptions async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param amount: the number to increment by """ return await self.bridge.incr(key, expiry, elastic_expiry, amount) async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ return await self.bridge.get(key) async def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ return await self.bridge.clear(key) async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ return await self.bridge.acquire_entry(key, limit, expiry, amount) async def get_moving_window( self, key: str, limit: int, expiry: int ) -> tuple[float, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (previous count, previous TTL, current count, current TTL) """ return await self.bridge.get_moving_window(key, limit, expiry) async def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: current_key = self._current_window_key(key) previous_key = self._previous_window_key(key) return await self.bridge.acquire_sliding_window_entry( previous_key, current_key, limit, expiry, amount ) async def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: previous_key = self._previous_window_key(key) current_key = self._current_window_key(key) return await self.bridge.get_sliding_window(previous_key, current_key, expiry) async def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ return await self.bridge.get_expiry(key) async def check(self) -> bool: """ Check if storage is healthy by calling ``PING`` """ return await self.bridge.check() async def reset(self) -> int | None: """ This function calls a Lua Script to delete keys prefixed with ``self.PREFIX`` in blocks of 5000. .. warning:: This operation was designed to be fast, but was not tested on a large production based system. Be careful with its usage as it could be slow on very large data sets. """ return await self.bridge.lua_reset() @versionadded(version="2.1") @versionchanged( version="4.2", reason="Added support for using the asyncio redis client from :pypi:`redis` ", ) @versionchanged( version="4.3", reason=( "Added support for using the asyncio redis client from :pypi:`valkey`" " through :paramref:`implementation` or if :paramref:`uri` has the" " ``async+valkey+cluster`` schema" ), ) class RedisClusterStorage(RedisStorage): """ Rate limit storage with redis cluster as backend Depends on :pypi:`coredis` or :pypi:`redis` """ STORAGE_SCHEME = ["async+redis+cluster", "async+valkey+cluster"] """ The storage schemes for redis cluster to be used in an async context """ MODE = "CLUSTER" def __init__( self, uri: str, wrap_exceptions: bool = False, implementation: Literal["redispy", "coredis", "valkey"] = "coredis", **options: float | str | bool, ) -> None: """ :param uri: url of the form ``async+redis+cluster://[:password]@host:port,host:port`` If the uri scheme is ``async+valkey+cluster`` the implementation used will be from :pypi:`valkey`. :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param implementation: Whether to use the client implementation from - ``coredis``: :class:`coredis.RedisCluster` - ``redispy``: :class:`redis.asyncio.cluster.RedisCluster` - ``valkey``: :class:`valkey.asyncio.cluster.ValkeyCluster` :param options: all remaining keyword arguments are passed directly to the constructor of :class:`coredis.RedisCluster` or :class:`redis.asyncio.RedisCluster` :raise ConfigurationError: when the redis library is not available or if the redis host cannot be pinged. """ super().__init__( uri, wrap_exceptions=wrap_exceptions, implementation=implementation, **options, ) def configure_bridge(self) -> None: self.bridge.use_cluster(**self.options) async def reset(self) -> int | None: """ Redis Clusters are sharded and deleting across shards can't be done atomically. Because of this, this reset loops over all keys that are prefixed with ``self.PREFIX`` and calls delete on them, one at a time. .. warning:: This operation was not tested with extremely large data sets. On a large production based system, care should be taken with its usage as it could be slow on very large data sets """ return await self.bridge.reset() @versionadded(version="2.1") @versionchanged( version="4.2", reason="Added support for using the asyncio redis client from :pypi:`redis` ", ) @versionchanged( version="4.3", reason=( "Added support for using the asyncio redis client from :pypi:`valkey`" " through :paramref:`implementation` or if :paramref:`uri` has the" " ``async+valkey+sentinel`` schema" ), ) class RedisSentinelStorage(RedisStorage): """ Rate limit storage with redis sentinel as backend Depends on :pypi:`coredis` or :pypi:`redis` """ STORAGE_SCHEME = [ "async+redis+sentinel", "async+valkey+sentinel", ] """The storage scheme for redis accessed via a redis sentinel installation""" MODE = "SENTINEL" DEPENDENCIES = { "redis": Version("5.2.0"), "coredis": Version("3.4.0"), "coredis.sentinel": Version("3.4.0"), "valkey": Version("6.0"), } def __init__( self, uri: str, wrap_exceptions: bool = False, implementation: Literal["redispy", "coredis", "valkey"] = "coredis", service_name: str | None = None, use_replicas: bool = True, sentinel_kwargs: dict[str, float | str | bool] | None = None, **options: float | str | bool, ): """ :param uri: url of the form ``async+redis+sentinel://host:port,host:port/service_name`` If the uri schema is ``async+valkey+sentinel`` the implementation used will be from :pypi:`valkey`. :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param implementation: Whether to use the client implementation from - ``coredis``: :class:`coredis.sentinel.Sentinel` - ``redispy``: :class:`redis.asyncio.sentinel.Sentinel` - ``valkey``: :class:`valkey.asyncio.sentinel.Sentinel` :param service_name: sentinel service name (if not provided in `uri`) :param use_replicas: Whether to use replicas for read only operations :param sentinel_kwargs: optional arguments to pass as `sentinel_kwargs`` to :class:`coredis.sentinel.Sentinel` or :class:`redis.asyncio.Sentinel` :param options: all remaining keyword arguments are passed directly to the constructor of :class:`coredis.sentinel.Sentinel` or :class:`redis.asyncio.sentinel.Sentinel` :raise ConfigurationError: when the redis library is not available or if the redis primary host cannot be pinged. """ self.service_name = service_name self.use_replicas = use_replicas self.sentinel_kwargs = sentinel_kwargs super().__init__( uri, wrap_exceptions=wrap_exceptions, implementation=implementation, **options, ) def configure_bridge(self) -> None: self.bridge.use_sentinel( self.service_name, self.use_replicas, self.sentinel_kwargs, **self.options ) limits-4.4.1/limits/aio/storage/redis/bridge.py000066400000000000000000000061571476517132700215000ustar00rootroot00000000000000from __future__ import annotations import urllib from abc import ABC, abstractmethod from types import ModuleType from limits.util import get_package_data class RedisBridge(ABC): PREFIX = "LIMITS" RES_DIR = "resources/redis/lua_scripts" SCRIPT_MOVING_WINDOW = get_package_data(f"{RES_DIR}/moving_window.lua") SCRIPT_ACQUIRE_MOVING_WINDOW = get_package_data( f"{RES_DIR}/acquire_moving_window.lua" ) SCRIPT_CLEAR_KEYS = get_package_data(f"{RES_DIR}/clear_keys.lua") SCRIPT_INCR_EXPIRE = get_package_data(f"{RES_DIR}/incr_expire.lua") SCRIPT_SLIDING_WINDOW = get_package_data(f"{RES_DIR}/sliding_window.lua") SCRIPT_ACQUIRE_SLIDING_WINDOW = get_package_data( f"{RES_DIR}/acquire_sliding_window.lua" ) def __init__( self, uri: str, dependency: ModuleType, ) -> None: self.uri = uri self.parsed_uri = urllib.parse.urlparse(self.uri) self.dependency = dependency self.parsed_auth = {} if self.parsed_uri.username: self.parsed_auth["username"] = self.parsed_uri.username if self.parsed_uri.password: self.parsed_auth["password"] = self.parsed_uri.password def prefixed_key(self, key: str) -> str: return f"{self.PREFIX}:{key}" @abstractmethod def register_scripts(self) -> None: ... @abstractmethod def use_sentinel( self, service_name: str | None, use_replicas: bool, sentinel_kwargs: dict[str, str | float | bool] | None, **options: str | float | bool, ) -> None: ... @abstractmethod def use_basic(self, **options: str | float | bool) -> None: ... @abstractmethod def use_cluster(self, **options: str | float | bool) -> None: ... @property @abstractmethod def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: ... @abstractmethod async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1, ) -> int: ... @abstractmethod async def get(self, key: str) -> int: ... @abstractmethod async def clear(self, key: str) -> None: ... @abstractmethod async def get_moving_window( self, key: str, limit: int, expiry: int ) -> tuple[float, int]: ... @abstractmethod async def get_sliding_window( self, previous_key: str, current_key: str, expiry: int ) -> tuple[int, float, int, float]: ... @abstractmethod async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: ... @abstractmethod async def acquire_sliding_window_entry( self, previous_key: str, current_key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: ... @abstractmethod async def get_expiry(self, key: str) -> float: ... @abstractmethod async def check(self) -> bool: ... @abstractmethod async def reset(self) -> int | None: ... @abstractmethod async def lua_reset(self) -> int | None: ... limits-4.4.1/limits/aio/storage/redis/coredis.py000066400000000000000000000165061476517132700216730ustar00rootroot00000000000000from __future__ import annotations import time from typing import TYPE_CHECKING, cast from limits.aio.storage.redis.bridge import RedisBridge from limits.errors import ConfigurationError from limits.typing import AsyncCoRedisClient, Callable if TYPE_CHECKING: import coredis class CoredisBridge(RedisBridge): DEFAULT_CLUSTER_OPTIONS: dict[str, float | str | bool] = { "max_connections": 1000, } "Default options passed to :class:`coredis.RedisCluster`" @property def base_exceptions(self) -> type[Exception] | tuple[type[Exception], ...]: return (self.dependency.exceptions.RedisError,) def use_sentinel( self, service_name: str | None, use_replicas: bool, sentinel_kwargs: dict[str, str | float | bool] | None, **options: str | float | bool, ) -> None: sentinel_configuration = [] connection_options = options.copy() sep = self.parsed_uri.netloc.find("@") + 1 for loc in self.parsed_uri.netloc[sep:].split(","): host, port = loc.split(":") sentinel_configuration.append((host, int(port))) service_name = ( self.parsed_uri.path.replace("/", "") if self.parsed_uri.path else service_name ) if service_name is None: raise ConfigurationError("'service_name' not provided") self.sentinel = self.dependency.sentinel.Sentinel( sentinel_configuration, sentinel_kwargs={**self.parsed_auth, **(sentinel_kwargs or {})}, **{**self.parsed_auth, **connection_options}, ) self.storage = self.sentinel.primary_for(service_name) self.storage_replica = self.sentinel.replica_for(service_name) self.connection_getter = lambda readonly: ( self.storage_replica if readonly and use_replicas else self.storage ) def use_basic(self, **options: str | float | bool) -> None: if connection_pool := options.pop("connection_pool", None): self.storage = self.dependency.Redis( connection_pool=connection_pool, **options ) else: self.storage = self.dependency.Redis.from_url(self.uri, **options) self.connection_getter = lambda _: self.storage def use_cluster(self, **options: str | float | bool) -> None: sep = self.parsed_uri.netloc.find("@") + 1 cluster_hosts: list[dict[str, int | str]] = [] cluster_hosts.extend( {"host": host, "port": int(port)} for loc in self.parsed_uri.netloc[sep:].split(",") if loc for host, port in [loc.split(":")] ) self.storage = self.dependency.RedisCluster( startup_nodes=cluster_hosts, **{**self.DEFAULT_CLUSTER_OPTIONS, **self.parsed_auth, **options}, ) self.connection_getter = lambda _: self.storage lua_moving_window: coredis.commands.Script[bytes] lua_acquire_moving_window: coredis.commands.Script[bytes] lua_sliding_window: coredis.commands.Script[bytes] lua_acquire_sliding_window: coredis.commands.Script[bytes] lua_clear_keys: coredis.commands.Script[bytes] lua_incr_expire: coredis.commands.Script[bytes] connection_getter: Callable[[bool], AsyncCoRedisClient] def get_connection(self, readonly: bool = False) -> AsyncCoRedisClient: return self.connection_getter(readonly) def register_scripts(self) -> None: self.lua_moving_window = self.get_connection().register_script( self.SCRIPT_MOVING_WINDOW ) self.lua_acquire_moving_window = self.get_connection().register_script( self.SCRIPT_ACQUIRE_MOVING_WINDOW ) self.lua_clear_keys = self.get_connection().register_script( self.SCRIPT_CLEAR_KEYS ) self.lua_incr_expire = self.get_connection().register_script( self.SCRIPT_INCR_EXPIRE ) self.lua_sliding_window = self.get_connection().register_script( self.SCRIPT_SLIDING_WINDOW ) self.lua_acquire_sliding_window = self.get_connection().register_script( self.SCRIPT_ACQUIRE_SLIDING_WINDOW ) async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: key = self.prefixed_key(key) value = await self.get_connection().incrby(key, amount) if elastic_expiry or value == amount: await self.get_connection().expire(key, expiry) return value async def get(self, key: str) -> int: key = self.prefixed_key(key) return int(await self.get_connection(readonly=True).get(key) or 0) async def clear(self, key: str) -> None: key = self.prefixed_key(key) await self.get_connection().delete([key]) async def lua_reset(self) -> int | None: return cast(int, await self.lua_clear_keys.execute([self.prefixed_key("*")])) async def get_moving_window( self, key: str, limit: int, expiry: int ) -> tuple[float, int]: key = self.prefixed_key(key) timestamp = time.time() window = await self.lua_moving_window.execute( [key], [timestamp - expiry, limit] ) if window: return float(window[0]), window[1] # type: ignore return timestamp, 0 async def get_sliding_window( self, previous_key: str, current_key: str, expiry: int ) -> tuple[int, float, int, float]: previous_key = self.prefixed_key(previous_key) current_key = self.prefixed_key(current_key) if window := await self.lua_sliding_window.execute( [previous_key, current_key], [expiry] ): return ( int(window[0] or 0), # type: ignore max(0, float(window[1] or 0)) / 1000, # type: ignore int(window[2] or 0), # type: ignore max(0, float(window[3] or 0)) / 1000, # type: ignore ) return 0, 0.0, 0, 0.0 async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: key = self.prefixed_key(key) timestamp = time.time() acquired = await self.lua_acquire_moving_window.execute( [key], [timestamp, limit, expiry, amount] ) return bool(acquired) async def acquire_sliding_window_entry( self, previous_key: str, current_key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: previous_key = self.prefixed_key(previous_key) current_key = self.prefixed_key(current_key) acquired = await self.lua_acquire_sliding_window.execute( [previous_key, current_key], [limit, expiry, amount] ) return bool(acquired) async def get_expiry(self, key: str) -> float: key = self.prefixed_key(key) return max(await self.get_connection().ttl(key), 0) + time.time() async def check(self) -> bool: try: await self.get_connection().ping() return True except: # noqa return False async def reset(self) -> int | None: prefix = self.prefixed_key("*") keys = await self.storage.keys(prefix) count = 0 for key in keys: count += await self.storage.delete([key]) return count limits-4.4.1/limits/aio/storage/redis/redispy.py000066400000000000000000000205431476517132700217160ustar00rootroot00000000000000from __future__ import annotations import time from typing import TYPE_CHECKING, cast from limits.aio.storage.redis.bridge import RedisBridge from limits.errors import ConfigurationError from limits.typing import AsyncRedisClient, Callable if TYPE_CHECKING: import redis.commands class RedispyBridge(RedisBridge): DEFAULT_CLUSTER_OPTIONS: dict[str, float | str | bool] = { "max_connections": 1000, } "Default options passed to :class:`redis.asyncio.RedisCluster`" @property def base_exceptions(self) -> type[Exception] | tuple[type[Exception], ...]: return (self.dependency.RedisError,) def use_sentinel( self, service_name: str | None, use_replicas: bool, sentinel_kwargs: dict[str, str | float | bool] | None, **options: str | float | bool, ) -> None: sentinel_configuration = [] connection_options = options.copy() sep = self.parsed_uri.netloc.find("@") + 1 for loc in self.parsed_uri.netloc[sep:].split(","): host, port = loc.split(":") sentinel_configuration.append((host, int(port))) service_name = ( self.parsed_uri.path.replace("/", "") if self.parsed_uri.path else service_name ) if service_name is None: raise ConfigurationError("'service_name' not provided") self.sentinel = self.dependency.asyncio.Sentinel( sentinel_configuration, sentinel_kwargs={**self.parsed_auth, **(sentinel_kwargs or {})}, **{**self.parsed_auth, **connection_options}, ) self.storage = self.sentinel.master_for(service_name) self.storage_replica = self.sentinel.slave_for(service_name) self.connection_getter = lambda readonly: ( self.storage_replica if readonly and use_replicas else self.storage ) def use_basic(self, **options: str | float | bool) -> None: if connection_pool := options.pop("connection_pool", None): self.storage = self.dependency.asyncio.Redis( connection_pool=connection_pool, **options ) else: self.storage = self.dependency.asyncio.Redis.from_url(self.uri, **options) self.connection_getter = lambda _: self.storage def use_cluster(self, **options: str | float | bool) -> None: sep = self.parsed_uri.netloc.find("@") + 1 cluster_hosts = [] for loc in self.parsed_uri.netloc[sep:].split(","): host, port = loc.split(":") cluster_hosts.append( self.dependency.asyncio.cluster.ClusterNode(host=host, port=int(port)) ) self.storage = self.dependency.asyncio.RedisCluster( startup_nodes=cluster_hosts, **{**self.DEFAULT_CLUSTER_OPTIONS, **self.parsed_auth, **options}, ) self.connection_getter = lambda _: self.storage lua_moving_window: redis.commands.core.Script lua_acquire_moving_window: redis.commands.core.Script lua_sliding_window: redis.commands.core.Script lua_acquire_sliding_window: redis.commands.core.Script lua_clear_keys: redis.commands.core.Script lua_incr_expire: redis.commands.core.Script connection_getter: Callable[[bool], AsyncRedisClient] def get_connection(self, readonly: bool = False) -> AsyncRedisClient: return self.connection_getter(readonly) def register_scripts(self) -> None: # Redis-py uses a slightly different script registration self.lua_moving_window = self.get_connection().register_script( self.SCRIPT_MOVING_WINDOW ) self.lua_acquire_moving_window = self.get_connection().register_script( self.SCRIPT_ACQUIRE_MOVING_WINDOW ) self.lua_clear_keys = self.get_connection().register_script( self.SCRIPT_CLEAR_KEYS ) self.lua_incr_expire = self.get_connection().register_script( self.SCRIPT_INCR_EXPIRE ) self.lua_sliding_window = self.get_connection().register_script( self.SCRIPT_SLIDING_WINDOW ) self.lua_acquire_sliding_window = self.get_connection().register_script( self.SCRIPT_ACQUIRE_SLIDING_WINDOW ) async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1, ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param amount: the number to increment by """ key = self.prefixed_key(key) if elastic_expiry: value = await self.get_connection().incrby(key, amount) await self.get_connection().expire(key, expiry) return value else: return cast(int, await self.lua_incr_expire([key], [expiry, amount])) async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ key = self.prefixed_key(key) return int(await self.get_connection(readonly=True).get(key) or 0) async def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ key = self.prefixed_key(key) await self.get_connection().delete(key) async def lua_reset(self) -> int | None: return cast(int, await self.lua_clear_keys([self.prefixed_key("*")])) async def get_moving_window( self, key: str, limit: int, expiry: int ) -> tuple[float, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (previous count, previous TTL, current count, current TTL) """ key = self.prefixed_key(key) timestamp = time.time() window = await self.lua_moving_window([key], [timestamp - expiry, limit]) if window: return float(window[0]), window[1] return timestamp, 0 async def get_sliding_window( self, previous_key: str, current_key: str, expiry: int ) -> tuple[int, float, int, float]: if window := await self.lua_sliding_window( [self.prefixed_key(previous_key), self.prefixed_key(current_key)], [expiry] ): return ( int(window[0] or 0), max(0, float(window[1] or 0)) / 1000, int(window[2] or 0), max(0, float(window[3] or 0)) / 1000, ) return 0, 0.0, 0, 0.0 async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry """ key = self.prefixed_key(key) timestamp = time.time() acquired = await self.lua_acquire_moving_window( [key], [timestamp, limit, expiry, amount] ) return bool(acquired) async def acquire_sliding_window_entry( self, previous_key: str, current_key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: previous_key = self.prefixed_key(previous_key) current_key = self.prefixed_key(current_key) acquired = await self.lua_acquire_sliding_window( [previous_key, current_key], [limit, expiry, amount] ) return bool(acquired) async def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ key = self.prefixed_key(key) return max(await self.get_connection().ttl(key), 0) + time.time() async def check(self) -> bool: """ check if storage is healthy """ try: await self.get_connection().ping() return True except: # noqa return False async def reset(self) -> int | None: prefix = self.prefixed_key("*") keys = await self.storage.keys( prefix, target_nodes=self.dependency.asyncio.cluster.RedisCluster.ALL_NODES ) count = 0 for key in keys: count += await self.storage.delete(key) return count limits-4.4.1/limits/aio/storage/redis/valkey.py000066400000000000000000000003701476517132700215260ustar00rootroot00000000000000from __future__ import annotations from .redispy import RedispyBridge class ValkeyBridge(RedispyBridge): @property def base_exceptions(self) -> type[Exception] | tuple[type[Exception], ...]: return (self.dependency.ValkeyError,) limits-4.4.1/limits/aio/strategies.py000066400000000000000000000250301476517132700176330ustar00rootroot00000000000000""" Asynchronous rate limiting strategies """ from __future__ import annotations import time from abc import ABC, abstractmethod from math import floor, inf from deprecated.sphinx import deprecated, versionadded from ..limits import RateLimitItem from ..storage import StorageTypes from ..typing import cast from ..util import WindowStats from .storage import MovingWindowSupport, Storage from .storage.base import SlidingWindowCounterSupport class RateLimiter(ABC): def __init__(self, storage: StorageTypes): assert isinstance(storage, Storage) self.storage: Storage = storage @abstractmethod async def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :param cost: The cost of this hit, default 1 """ raise NotImplementedError @abstractmethod async def test(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Check if the rate limit can be consumed :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :param cost: The expected cost to be consumed, default 1 """ raise NotImplementedError @abstractmethod async def get_window_stats( self, item: RateLimitItem, *identifiers: str ) -> WindowStats: """ Query the reset time and remaining amount for the limit :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :return: (reset time, remaining)) """ raise NotImplementedError async def clear(self, item: RateLimitItem, *identifiers: str) -> None: return await self.storage.clear(item.key_for(*identifiers)) class MovingWindowRateLimiter(RateLimiter): """ Reference: :ref:`strategies:moving window` """ def __init__(self, storage: StorageTypes) -> None: if not ( hasattr(storage, "acquire_entry") or hasattr(storage, "get_moving_window") ): raise NotImplementedError( "MovingWindowRateLimiting is not implemented for storage " f"of type {storage.__class__}" ) super().__init__(storage) async def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :param cost: The cost of this hit, default 1 """ return await cast(MovingWindowSupport, self.storage).acquire_entry( item.key_for(*identifiers), item.amount, item.get_expiry(), amount=cost ) async def test(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Check if the rate limit can be consumed :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :param cost: The expected cost to be consumed, default 1 """ res = await cast(MovingWindowSupport, self.storage).get_moving_window( item.key_for(*identifiers), item.amount, item.get_expiry(), ) amount = res[1] return amount <= item.amount - cost async def get_window_stats( self, item: RateLimitItem, *identifiers: str ) -> WindowStats: """ returns the number of requests remaining within this limit. :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :return: (reset time, remaining) """ window_start, window_items = await cast( MovingWindowSupport, self.storage ).get_moving_window(item.key_for(*identifiers), item.amount, item.get_expiry()) reset = window_start + item.get_expiry() return WindowStats(reset, item.amount - window_items) class FixedWindowRateLimiter(RateLimiter): """ Reference: :ref:`strategies:fixed window` """ async def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :param cost: The cost of this hit, default 1 """ return ( await self.storage.incr( item.key_for(*identifiers), item.get_expiry(), elastic_expiry=False, amount=cost, ) <= item.amount ) async def test(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Check if the rate limit can be consumed :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :param cost: The expected cost to be consumed, default 1 """ return ( await self.storage.get(item.key_for(*identifiers)) < item.amount - cost + 1 ) async def get_window_stats( self, item: RateLimitItem, *identifiers: str ) -> WindowStats: """ Query the reset time and remaining amount for the limit :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :return: reset time, remaining """ remaining = max( 0, item.amount - await self.storage.get(item.key_for(*identifiers)), ) reset = await self.storage.get_expiry(item.key_for(*identifiers)) return WindowStats(reset, remaining) @versionadded(version="4.1") class SlidingWindowCounterRateLimiter(RateLimiter): """ Reference: :ref:`strategies:sliding window counter` """ def __init__(self, storage: StorageTypes): if not hasattr(storage, "get_sliding_window") or not hasattr( storage, "acquire_sliding_window_entry" ): raise NotImplementedError( "SlidingWindowCounterRateLimiting is not implemented for storage " f"of type {storage.__class__}" ) super().__init__(storage) def _weighted_count( self, item: RateLimitItem, previous_count: int, previous_expires_in: float, current_count: int, ) -> float: """ Return the approximated by weighting the previous window count and adding the current window count. """ return previous_count * previous_expires_in / item.get_expiry() + current_count async def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The cost of this hit, default 1 """ return await cast( SlidingWindowCounterSupport, self.storage ).acquire_sliding_window_entry( item.key_for(*identifiers), item.amount, item.get_expiry(), cost, ) async def test(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Check if the rate limit can be consumed :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The expected cost to be consumed, default 1 """ previous_count, previous_expires_in, current_count, _ = await cast( SlidingWindowCounterSupport, self.storage ).get_sliding_window(item.key_for(*identifiers), item.get_expiry()) return ( self._weighted_count( item, previous_count, previous_expires_in, current_count ) < item.amount - cost + 1 ) async def get_window_stats( self, item: RateLimitItem, *identifiers: str ) -> WindowStats: """ Query the reset time and remaining amount for the limit. :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :return: (reset time, remaining) """ ( previous_count, previous_expires_in, current_count, current_expires_in, ) = await cast(SlidingWindowCounterSupport, self.storage).get_sliding_window( item.key_for(*identifiers), item.get_expiry() ) remaining = max( 0, item.amount - floor( self._weighted_count( item, previous_count, previous_expires_in, current_count ) ), ) now = time.time() if not (previous_count or current_count): return WindowStats(now, remaining) expiry = item.get_expiry() previous_reset_in, current_reset_in = inf, inf if previous_count: previous_reset_in = previous_expires_in % (expiry / previous_count) if current_count: current_reset_in = current_expires_in % expiry return WindowStats(now + min(previous_reset_in, current_reset_in), remaining) @deprecated(version="4.1") class FixedWindowElasticExpiryRateLimiter(FixedWindowRateLimiter): """ Reference: :ref:`strategies:fixed window with elastic expiry` """ async def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: a :class:`limits.limits.RateLimitItem` instance :param identifiers: variable list of strings to uniquely identify the limit :param cost: The cost of this hit, default 1 """ amount = await self.storage.incr( item.key_for(*identifiers), item.get_expiry(), elastic_expiry=True, amount=cost, ) return amount <= item.amount STRATEGIES = { "sliding-window-counter": SlidingWindowCounterRateLimiter, "fixed-window": FixedWindowRateLimiter, "fixed-window-elastic-expiry": FixedWindowElasticExpiryRateLimiter, "moving-window": MovingWindowRateLimiter, } limits-4.4.1/limits/errors.py000066400000000000000000000012261476517132700162260ustar00rootroot00000000000000""" errors and exceptions """ from __future__ import annotations class ConfigurationError(Exception): """ Error raised when a configuration problem is encountered """ class ConcurrentUpdateError(Exception): """ Error raised when an update to limit fails due to concurrent updates """ def __init__(self, key: str, attempts: int) -> None: super().__init__(f"Unable to update {key} after {attempts} retries") class StorageError(Exception): """ Error raised when an error is encountered in a storage """ def __init__(self, storage_error: Exception) -> None: self.storage_error = storage_error limits-4.4.1/limits/limits.py000066400000000000000000000114531476517132700162160ustar00rootroot00000000000000""" """ from __future__ import annotations from functools import total_ordering from limits.typing import ClassVar, NamedTuple, cast def safe_string(value: bytes | str | int | float) -> str: """ normalize a byte/str/int or float to a str """ if isinstance(value, bytes): return value.decode() return str(value) class Granularity(NamedTuple): seconds: int name: str TIME_TYPES = dict( day=Granularity(60 * 60 * 24, "day"), month=Granularity(60 * 60 * 24 * 30, "month"), year=Granularity(60 * 60 * 24 * 30 * 12, "year"), hour=Granularity(60 * 60, "hour"), minute=Granularity(60, "minute"), second=Granularity(1, "second"), ) GRANULARITIES: dict[str, type[RateLimitItem]] = {} class RateLimitItemMeta(type): def __new__( cls, name: str, parents: tuple[type, ...], dct: dict[str, Granularity | list[str]], ) -> RateLimitItemMeta: if "__slots__" not in dct: dct["__slots__"] = [] granularity = super().__new__(cls, name, parents, dct) if "GRANULARITY" in dct: GRANULARITIES[dct["GRANULARITY"][1]] = cast( type[RateLimitItem], granularity ) return granularity # pylint: disable=no-member @total_ordering class RateLimitItem(metaclass=RateLimitItemMeta): """ defines a Rate limited resource which contains the characteristic namespace, amount and granularity multiples of the rate limiting window. :param amount: the rate limit amount :param multiples: multiple of the 'per' :attr:`GRANULARITY` (e.g. 'n' per 'm' seconds) :param namespace: category for the specific rate limit """ __slots__ = ["namespace", "amount", "multiples"] GRANULARITY: ClassVar[Granularity] """ A tuple describing the granularity of this limit as (number of seconds, name) """ def __init__( self, amount: int, multiples: int | None = 1, namespace: str = "LIMITER" ): self.namespace = namespace self.amount = int(amount) self.multiples = int(multiples or 1) @classmethod def check_granularity_string(cls, granularity_string: str) -> bool: """ Checks if this instance matches a *granularity_string* of type ``n per hour``, ``n per minute`` etc, by comparing with :attr:`GRANULARITY` """ return granularity_string.lower() in cls.GRANULARITY.name def get_expiry(self) -> int: """ :return: the duration the limit is enforced for in seconds. """ return self.GRANULARITY.seconds * self.multiples def key_for(self, *identifiers: bytes | str | int | float) -> str: """ Constructs a key for the current limit and any additional identifiers provided. :param identifiers: a list of strings to append to the key :return: a string key identifying this resource with each identifier separated with a '/' delimiter. """ remainder = "/".join( [safe_string(k) for k in identifiers] + [ safe_string(self.amount), safe_string(self.multiples), self.GRANULARITY.name, ] ) return f"{self.namespace}/{remainder}" def __eq__(self, other: object) -> bool: if isinstance(other, RateLimitItem): return ( self.amount == other.amount and self.GRANULARITY == other.GRANULARITY and self.multiples == other.multiples ) return False def __repr__(self) -> str: return f"{self.amount} per {self.multiples} {self.GRANULARITY.name}" def __lt__(self, other: RateLimitItem) -> bool: return self.GRANULARITY.seconds < other.GRANULARITY.seconds def __hash__(self) -> int: return hash((self.namespace, self.amount, self.multiples, self.GRANULARITY)) class RateLimitItemPerYear(RateLimitItem): """ per year rate limited resource. """ GRANULARITY = TIME_TYPES["year"] """A year""" class RateLimitItemPerMonth(RateLimitItem): """ per month rate limited resource. """ GRANULARITY = TIME_TYPES["month"] """A month""" class RateLimitItemPerDay(RateLimitItem): """ per day rate limited resource. """ GRANULARITY = TIME_TYPES["day"] """A day""" class RateLimitItemPerHour(RateLimitItem): """ per hour rate limited resource. """ GRANULARITY = TIME_TYPES["hour"] """An hour""" class RateLimitItemPerMinute(RateLimitItem): """ per minute rate limited resource. """ GRANULARITY = TIME_TYPES["minute"] """A minute""" class RateLimitItemPerSecond(RateLimitItem): """ per second rate limited resource. """ GRANULARITY = TIME_TYPES["second"] """A second""" limits-4.4.1/limits/py.typed000066400000000000000000000000001476517132700160240ustar00rootroot00000000000000limits-4.4.1/limits/resources/000077500000000000000000000000001476517132700163515ustar00rootroot00000000000000limits-4.4.1/limits/resources/redis/000077500000000000000000000000001476517132700174575ustar00rootroot00000000000000limits-4.4.1/limits/resources/redis/lua_scripts/000077500000000000000000000000001476517132700220075ustar00rootroot00000000000000limits-4.4.1/limits/resources/redis/lua_scripts/acquire_moving_window.lua000066400000000000000000000007431476517132700271150ustar00rootroot00000000000000local timestamp = tonumber(ARGV[1]) local limit = tonumber(ARGV[2]) local expiry = tonumber(ARGV[3]) local amount = tonumber(ARGV[4]) if amount > limit then return false end local entry = redis.call('lindex', KEYS[1], limit - amount) if entry and tonumber(entry) >= timestamp - expiry then return false end for i = 1, amount do redis.call('lpush', KEYS[1], timestamp) end redis.call('ltrim', KEYS[1], 0, limit - 1) redis.call('expire', KEYS[1], expiry) return true limits-4.4.1/limits/resources/redis/lua_scripts/acquire_sliding_window.lua000066400000000000000000000024611476517132700272460ustar00rootroot00000000000000-- Time is in milliseconds in this script: TTL, expiry... local limit = tonumber(ARGV[1]) local expiry = tonumber(ARGV[2]) * 1000 local amount = tonumber(ARGV[3]) if amount > limit then return false end local current_ttl = tonumber(redis.call('pttl', KEYS[2])) if current_ttl > 0 and current_ttl < expiry then -- Current window expired, shift it to the previous window redis.call('rename', KEYS[2], KEYS[1]) redis.call('set', KEYS[2], 0, 'PX', current_ttl + expiry) end local previous_count = tonumber(redis.call('get', KEYS[1])) or 0 local previous_ttl = tonumber(redis.call('pttl', KEYS[1])) or 0 local current_count = tonumber(redis.call('get', KEYS[2])) or 0 current_ttl = tonumber(redis.call('pttl', KEYS[2])) or 0 -- If the values don't exist yet, consider the TTL is 0 if previous_ttl <= 0 then previous_ttl = 0 end if current_ttl <= 0 then current_ttl = 0 end local weighted_count = math.floor(previous_count * previous_ttl / expiry) + current_count if (weighted_count + amount) > limit then return false end -- If the current counter exists, increase its value if redis.call('exists', KEYS[2]) == 1 then redis.call('incrby', KEYS[2], amount) else -- Otherwise, set the value with twice the expiry time redis.call('set', KEYS[2], amount, 'PX', expiry * 2) end return true limits-4.4.1/limits/resources/redis/lua_scripts/clear_keys.lua000066400000000000000000000002701476517132700246320ustar00rootroot00000000000000local keys = redis.call('keys', KEYS[1]) local res = 0 for i=1,#keys,5000 do res = res + redis.call( 'del', unpack(keys, i, math.min(i+4999, #keys)) ) end return res limits-4.4.1/limits/resources/redis/lua_scripts/incr_expire.lua000066400000000000000000000003031476517132700250150ustar00rootroot00000000000000local current local amount = tonumber(ARGV[2]) current = redis.call("incrby", KEYS[1], amount) if tonumber(current) == amount then redis.call("expire", KEYS[1], ARGV[1]) end return current limits-4.4.1/limits/resources/redis/lua_scripts/moving_window.lua000066400000000000000000000006561476517132700254070ustar00rootroot00000000000000local items = redis.call('lrange', KEYS[1], 0, tonumber(ARGV[2])) local expiry = tonumber(ARGV[1]) local a = 0 local oldest = nil for idx=1,#items do if tonumber(items[idx]) >= expiry then a = a + 1 local value = tonumber(items[idx]) if oldest == nil or value < oldest then oldest = value end else break end end if oldest then return {tostring(oldest), a} endlimits-4.4.1/limits/resources/redis/lua_scripts/sliding_window.lua000066400000000000000000000013111476517132700255260ustar00rootroot00000000000000local expiry = tonumber(ARGV[1]) * 1000 local previous_count = redis.call('get', KEYS[1]) local previous_ttl = redis.call('pttl', KEYS[1]) local current_count = redis.call('get', KEYS[2]) local current_ttl = redis.call('pttl', KEYS[2]) if current_ttl > 0 and current_ttl < expiry then -- Current window expired, shift it to the previous window redis.call('rename', KEYS[2], KEYS[1]) redis.call('set', KEYS[2], 0, 'PX', current_ttl + expiry) previous_count = redis.call('get', KEYS[1]) previous_ttl = redis.call('pttl', KEYS[1]) current_count = redis.call('get', KEYS[2]) current_ttl = redis.call('pttl', KEYS[2]) end return {previous_count, previous_ttl, current_count, current_ttl} limits-4.4.1/limits/storage/000077500000000000000000000000001476517132700160035ustar00rootroot00000000000000limits-4.4.1/limits/storage/__init__.py000066400000000000000000000052151476517132700201170ustar00rootroot00000000000000""" Implementations of storage backends to be used with :class:`limits.strategies.RateLimiter` strategies """ from __future__ import annotations import urllib import limits # noqa from ..errors import ConfigurationError from ..typing import TypeAlias, cast from .base import MovingWindowSupport, SlidingWindowCounterSupport, Storage from .etcd import EtcdStorage from .memcached import MemcachedStorage from .memory import MemoryStorage from .mongodb import MongoDBStorage, MongoDBStorageBase from .redis import RedisStorage from .redis_cluster import RedisClusterStorage from .redis_sentinel import RedisSentinelStorage from .registry import SCHEMES StorageTypes: TypeAlias = "Storage | limits.aio.storage.Storage" def storage_from_string( storage_string: str, **options: float | str | bool ) -> StorageTypes: """ Factory function to get an instance of the storage class based on the uri of the storage. In most cases using it should be sufficient instead of directly instantiating the storage classes. for example:: from limits.storage import storage_from_string memory = storage_from_string("memory://") memcached = storage_from_string("memcached://localhost:11211") redis = storage_from_string("redis://localhost:6379") The same function can be used to construct the :ref:`storage:async storage` variants, for example:: from limits.storage import storage_from_string memory = storage_from_string("async+memory://") memcached = storage_from_string("async+memcached://localhost:11211") redis = storage_from_string("async+redis://localhost:6379") :param storage_string: a string of the form ``scheme://host:port``. More details about supported storage schemes can be found at :ref:`storage:storage scheme` :param options: all remaining keyword arguments are passed to the constructor matched by :paramref:`storage_string`. :raises ConfigurationError: when the :attr:`storage_string` cannot be mapped to a registered :class:`limits.storage.Storage` or :class:`limits.aio.storage.Storage` instance. """ scheme = urllib.parse.urlparse(storage_string).scheme if scheme not in SCHEMES: raise ConfigurationError(f"unknown storage scheme : {storage_string}") return cast(StorageTypes, SCHEMES[scheme](storage_string, **options)) __all__ = [ "EtcdStorage", "MemcachedStorage", "MemoryStorage", "MongoDBStorage", "MongoDBStorageBase", "MovingWindowSupport", "RedisClusterStorage", "RedisSentinelStorage", "RedisStorage", "SlidingWindowCounterSupport", "Storage", "storage_from_string", ] limits-4.4.1/limits/storage/base.py000066400000000000000000000155331476517132700172760ustar00rootroot00000000000000from __future__ import annotations import functools from abc import ABC, abstractmethod from limits import errors from limits.storage.registry import StorageRegistry from limits.typing import ( Any, Callable, P, R, cast, ) from limits.util import LazyDependency def _wrap_errors( fn: Callable[P, R], ) -> Callable[P, R]: @functools.wraps(fn) def inner(*args: P.args, **kwargs: P.kwargs) -> R: instance = cast(Storage, args[0]) try: return fn(*args, **kwargs) except instance.base_exceptions as exc: if instance.wrap_exceptions: raise errors.StorageError(exc) from exc raise return inner class Storage(LazyDependency, metaclass=StorageRegistry): """ Base class to extend when implementing a storage backend. """ STORAGE_SCHEME: list[str] | None """The storage schemes to register against this implementation""" def __init_subclass__(cls, **kwargs: Any) -> None: # type: ignore[explicit-any] for method in { "incr", "get", "get_expiry", "check", "reset", "clear", }: setattr(cls, method, _wrap_errors(getattr(cls, method))) super().__init_subclass__(**kwargs) def __init__( self, uri: str | None = None, wrap_exceptions: bool = False, **options: float | str | bool, ): """ :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. """ super().__init__() self.wrap_exceptions = wrap_exceptions @property @abstractmethod def base_exceptions(self) -> type[Exception] | tuple[type[Exception], ...]: raise NotImplementedError @abstractmethod def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ raise NotImplementedError @abstractmethod def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ raise NotImplementedError @abstractmethod def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ raise NotImplementedError @abstractmethod def check(self) -> bool: """ check if storage is healthy """ raise NotImplementedError @abstractmethod def reset(self) -> int | None: """ reset storage to clear limits """ raise NotImplementedError @abstractmethod def clear(self, key: str) -> None: """ resets the rate limit key :param key: the key to clear rate limits for """ raise NotImplementedError class MovingWindowSupport(ABC): """ Abstract base class for storages that support the :ref:`strategies:moving window` strategy """ def __init_subclass__(cls, **kwargs: Any) -> None: # type: ignore[explicit-any] for method in { "acquire_entry", "get_moving_window", }: setattr( cls, method, _wrap_errors(getattr(cls, method)), ) super().__init_subclass__(**kwargs) @abstractmethod def acquire_entry(self, key: str, limit: int, expiry: int, amount: int = 1) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ raise NotImplementedError @abstractmethod def get_moving_window(self, key: str, limit: int, expiry: int) -> tuple[float, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ raise NotImplementedError class SlidingWindowCounterSupport(ABC): """ Abstract base class for storages that support the :ref:`strategies:sliding window counter` strategy. """ def __init_subclass__(cls, **kwargs: Any) -> None: # type: ignore[explicit-any] for method in {"acquire_sliding_window_entry", "get_sliding_window"}: setattr( cls, method, _wrap_errors(getattr(cls, method)), ) super().__init_subclass__(**kwargs) @abstractmethod def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: """ Acquire an entry if the weighted count of the current and previous windows is less than or equal to the limit :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ raise NotImplementedError @abstractmethod def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: """ Return the previous and current window information. :param key: the rate limit key :param expiry: the rate limit expiry, needed to compute the key in some implementations :return: a tuple of (int, float, int, float) with the following information: - previous window counter - previous window TTL - current window counter - current window TTL """ raise NotImplementedError class TimestampedSlidingWindow: """Helper class for storage that support the sliding window counter, with timestamp based keys.""" @classmethod def sliding_window_keys(cls, key: str, expiry: int, at: float) -> tuple[str, str]: """ returns the previous and the current window's keys. :param key: the key to get the window's keys from :param expiry: the expiry of the limit item, in seconds :param at: the timestamp to get the keys from. Default to now, ie ``time.time()`` Returns a tuple with the previous and the current key: (previous, current). Example: - key = "mykey" - expiry = 60 - at = 1738576292.6631825 The return value will be the tuple ``("mykey/28976271", "mykey/28976270")``. """ return f"{key}/{int((at - expiry) / expiry)}", f"{key}/{int(at / expiry)}" limits-4.4.1/limits/storage/etcd.py000066400000000000000000000112111476517132700172700ustar00rootroot00000000000000from __future__ import annotations import time import urllib.parse from deprecated.sphinx import deprecated from limits.errors import ConcurrentUpdateError from limits.storage.base import Storage from limits.typing import TYPE_CHECKING if TYPE_CHECKING: import etcd3 @deprecated(version="4.4") class EtcdStorage(Storage): """ Rate limit storage with etcd as backend. Depends on :pypi:`etcd3`. """ STORAGE_SCHEME = ["etcd"] """The storage scheme for etcd""" DEPENDENCIES = ["etcd3"] PREFIX = "limits" MAX_RETRIES = 5 def __init__( self, uri: str, max_retries: int = MAX_RETRIES, wrap_exceptions: bool = False, **options: str, ) -> None: """ :param uri: etcd location of the form ``etcd://host:port``, :param max_retries: Maximum number of attempts to retry in the case of concurrent updates to a rate limit key :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`etcd3.Etcd3Client` :raise ConfigurationError: when :pypi:`etcd3` is not available """ parsed = urllib.parse.urlparse(uri) self.lib = self.dependencies["etcd3"].module self.storage: etcd3.Etcd3Client = self.lib.client( parsed.hostname, parsed.port, **options ) self.max_retries = max_retries super().__init__(uri, wrap_exceptions=wrap_exceptions) @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return self.lib.Etcd3Exception # type: ignore[no-any-return] def prefixed_key(self, key: str) -> bytes: return f"{self.PREFIX}/{key}".encode() def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: retries = 0 etcd_key = self.prefixed_key(key) while retries < self.max_retries: now = time.time() lease = self.storage.lease(expiry) window_end = now + expiry create_attempt = self.storage.transaction( compare=[self.storage.transactions.create(etcd_key) == "0"], success=[ self.storage.transactions.put( etcd_key, f"{amount}:{window_end}".encode(), lease=lease.id, ) ], failure=[self.storage.transactions.get(etcd_key)], ) if create_attempt[0]: return amount else: cur, meta = create_attempt[1][0][0] cur_value, window_end = cur.split(b":") window_end = float(window_end) if window_end <= now: self.storage.revoke_lease(meta.lease_id) self.storage.delete(etcd_key) else: if elastic_expiry: self.storage.refresh_lease(meta.lease_id) window_end = now + expiry new = int(cur_value) + amount if self.storage.transaction( compare=[self.storage.transactions.value(etcd_key) == cur], success=[ self.storage.transactions.put( etcd_key, f"{new}:{window_end}".encode(), lease=meta.lease_id, ) ], failure=[], )[0]: return new retries += 1 raise ConcurrentUpdateError(key, retries) def get(self, key: str) -> int: value, meta = self.storage.get(self.prefixed_key(key)) if value: amount, expiry = value.split(b":") if float(expiry) > time.time(): return int(amount) return 0 def get_expiry(self, key: str) -> float: value, _ = self.storage.get(self.prefixed_key(key)) if value: return float(value.split(b":")[1]) return time.time() def check(self) -> bool: try: self.storage.status() return True except: # noqa return False def reset(self) -> int | None: return self.storage.delete_prefix(f"{self.PREFIX}/").deleted def clear(self, key: str) -> None: self.storage.delete(self.prefixed_key(key)) limits-4.4.1/limits/storage/memcached.py000066400000000000000000000257211476517132700202720ustar00rootroot00000000000000from __future__ import annotations import inspect import threading import time import urllib.parse from collections.abc import Iterable from math import ceil, floor from types import ModuleType from limits.errors import ConfigurationError from limits.storage.base import ( SlidingWindowCounterSupport, Storage, TimestampedSlidingWindow, ) from limits.typing import ( Any, Callable, MemcachedClientP, P, R, cast, ) from limits.util import get_dependency class MemcachedStorage(Storage, SlidingWindowCounterSupport, TimestampedSlidingWindow): """ Rate limit storage with memcached as backend. Depends on :pypi:`pymemcache`. """ STORAGE_SCHEME = ["memcached"] """The storage scheme for memcached""" DEPENDENCIES = ["pymemcache"] def __init__( self, uri: str, wrap_exceptions: bool = False, **options: str | Callable[[], MemcachedClientP], ) -> None: """ :param uri: memcached location of the form ``memcached://host:port,host:port``, ``memcached:///var/tmp/path/to/sock`` :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`pymemcache.client.base.PooledClient` or :class:`pymemcache.client.hash.HashClient` (if there are more than one hosts specified) :raise ConfigurationError: when :pypi:`pymemcache` is not available """ parsed = urllib.parse.urlparse(uri) self.hosts = [] for loc in parsed.netloc.strip().split(","): if not loc: continue host, port = loc.split(":") self.hosts.append((host, int(port))) else: # filesystem path to UDS if parsed.path and not parsed.netloc and not parsed.port: self.hosts = [parsed.path] # type: ignore self.dependency = self.dependencies["pymemcache"].module self.library = str(options.pop("library", "pymemcache.client")) self.cluster_library = str( options.pop("cluster_library", "pymemcache.client.hash") ) self.client_getter = cast( Callable[[ModuleType, list[tuple[str, int]]], MemcachedClientP], options.pop("client_getter", self.get_client), ) self.options = options if not get_dependency(self.library): raise ConfigurationError( f"memcached prerequisite not available. please install {self.library}" ) # pragma: no cover self.local_storage = threading.local() self.local_storage.storage = None super().__init__(uri, wrap_exceptions=wrap_exceptions) @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return self.dependency.MemcacheError # type: ignore[no-any-return] def get_client( self, module: ModuleType, hosts: list[tuple[str, int]], **kwargs: str ) -> MemcachedClientP: """ returns a memcached client. :param module: the memcached module :param hosts: list of memcached hosts """ return cast( MemcachedClientP, ( module.HashClient(hosts, **kwargs) if len(hosts) > 1 else module.PooledClient(*hosts, **kwargs) ), ) def call_memcached_func( self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> R: if "noreply" in kwargs: argspec = inspect.getfullargspec(func) if not ("noreply" in argspec.args or argspec.varkw): kwargs.pop("noreply") return func(*args, **kwargs) @property def storage(self) -> MemcachedClientP: """ lazily creates a memcached client instance using a thread local """ if not (hasattr(self.local_storage, "storage") and self.local_storage.storage): dependency = get_dependency( self.cluster_library if len(self.hosts) > 1 else self.library )[0] if not dependency: raise ConfigurationError(f"Unable to import {self.cluster_library}") self.local_storage.storage = self.client_getter( dependency, self.hosts, **self.options ) return cast(MemcachedClientP, self.local_storage.storage) def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ return int(self.storage.get(key, "0")) def get_many(self, keys: Iterable[str]) -> dict[str, Any]: # type:ignore[explicit-any] """ Return multiple counters at once :param keys: the keys to get the counter values for """ return self.storage.get_many(keys) def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ self.storage.delete(key) def incr( self, key: str, expiry: float, elastic_expiry: bool = False, amount: int = 1, set_expiration_key: bool = True, ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by :param set_expiration_key: set the expiration key with the expiration time if needed. If set to False, the key will still expire, but memcached cannot provide the expiration time. """ value = self.call_memcached_func(self.storage.incr, key, amount, noreply=False) if value is not None: if elastic_expiry: self.call_memcached_func(self.storage.touch, key, ceil(expiry)) if set_expiration_key: self.call_memcached_func( self.storage.set, self._expiration_key(key), expiry + time.time(), expire=ceil(expiry), noreply=False, ) return value else: if not self.call_memcached_func( self.storage.add, key, amount, ceil(expiry), noreply=False ): value = self.storage.incr(key, amount) or amount if elastic_expiry: self.call_memcached_func(self.storage.touch, key, ceil(expiry)) if set_expiration_key: self.call_memcached_func( self.storage.set, self._expiration_key(key), expiry + time.time(), expire=ceil(expiry), noreply=False, ) return value else: if set_expiration_key: self.call_memcached_func( self.storage.set, self._expiration_key(key), expiry + time.time(), expire=ceil(expiry), noreply=False, ) return amount def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ return float(self.storage.get(self._expiration_key(key)) or time.time()) def _expiration_key(self, key: str) -> str: """ Return the expiration key for the given counter key. Memcached doesn't natively return the expiration time or TTL for a given key, so we implement the expiration time on a separate key. """ return key + "/expires" def check(self) -> bool: """ Check if storage is healthy by calling the ``get`` command on the key ``limiter-check`` """ try: self.call_memcached_func(self.storage.get, "limiter-check") return True except: # noqa return False def reset(self) -> int | None: raise NotImplementedError def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: if amount > limit: return False now = time.time() previous_key, current_key = self.sliding_window_keys(key, expiry, now) previous_count, previous_ttl, current_count, _ = self._get_sliding_window_info( previous_key, current_key, expiry, now=now ) weighted_count = previous_count * previous_ttl / expiry + current_count if floor(weighted_count) + amount > limit: return False else: # Hit, increase the current counter. # If the counter doesn't exist yet, set twice the theorical expiry. # We don't need the expiration key as it is estimated with the timestamps directly. current_count = self.incr( current_key, 2 * expiry, amount=amount, set_expiration_key=False ) actualised_previous_ttl = min(0, previous_ttl - (time.time() - now)) weighted_count = ( previous_count * actualised_previous_ttl / expiry + current_count ) if floor(weighted_count) > limit: # Another hit won the race condition: revert the incrementation and refuse this hit # Limitation: during high concurrency at the end of the window, # the counter is shifted and cannot be decremented, so less requests than expected are allowed. self.call_memcached_func( self.storage.decr, current_key, amount, noreply=True, ) return False return True def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: now = time.time() previous_key, current_key = self.sliding_window_keys(key, expiry, now) return self._get_sliding_window_info(previous_key, current_key, expiry, now) def _get_sliding_window_info( self, previous_key: str, current_key: str, expiry: int, now: float ) -> tuple[int, float, int, float]: result = self.get_many([previous_key, current_key]) previous_count, current_count = ( int(result.get(previous_key, 0)), int(result.get(current_key, 0)), ) if previous_count == 0: previous_ttl = float(0) else: previous_ttl = (1 - (((now - expiry) / expiry) % 1)) * expiry current_ttl = (1 - ((now / expiry) % 1)) * expiry + expiry return previous_count, previous_ttl, current_count, current_ttl limits-4.4.1/limits/storage/memory.py000066400000000000000000000220121476517132700176620ustar00rootroot00000000000000from __future__ import annotations import threading import time from collections import Counter, defaultdict from math import floor import limits.typing from limits.storage.base import ( MovingWindowSupport, SlidingWindowCounterSupport, Storage, TimestampedSlidingWindow, ) class Entry: def __init__(self, expiry: float) -> None: self.atime = time.time() self.expiry = self.atime + expiry class MemoryStorage( Storage, MovingWindowSupport, SlidingWindowCounterSupport, TimestampedSlidingWindow ): """ rate limit storage using :class:`collections.Counter` as an in memory storage for fixed and elastic window strategies, and a simple list to implement moving window strategy. """ STORAGE_SCHEME = ["memory"] def __init__(self, uri: str | None = None, wrap_exceptions: bool = False, **_: str): self.storage: limits.typing.Counter[str] = Counter() self.locks: defaultdict[str, threading.RLock] = defaultdict(threading.RLock) self.expirations: dict[str, float] = {} self.events: dict[str, list[Entry]] = {} self.timer: threading.Timer = threading.Timer(0.01, self.__expire_events) self.timer.start() super().__init__(uri, wrap_exceptions=wrap_exceptions, **_) def __getstate__(self) -> dict[str, limits.typing.Any]: # type: ignore[explicit-any] state = self.__dict__.copy() del state["timer"] del state["locks"] return state def __setstate__(self, state: dict[str, limits.typing.Any]) -> None: # type: ignore[explicit-any] self.__dict__.update(state) self.locks = defaultdict(threading.RLock) self.timer = threading.Timer(0.01, self.__expire_events) self.timer.start() def __expire_events(self) -> None: for key in list(self.events.keys()): with self.locks[key]: for event in list(self.events[key]): if event.expiry <= time.time() and event in self.events[key]: self.events[key].remove(event) if not self.events.get(key, None): self.locks.pop(key, None) for key in list(self.expirations.keys()): if self.expirations[key] <= time.time(): self.storage.pop(key, None) self.expirations.pop(key, None) self.locks.pop(key, None) def __schedule_expiry(self) -> None: if not self.timer.is_alive(): self.timer = threading.Timer(0.01, self.__expire_events) self.timer.start() @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return ValueError def incr( self, key: str, expiry: float, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ self.get(key) self.__schedule_expiry() with self.locks[key]: self.storage[key] += amount if elastic_expiry or self.storage[key] == amount: self.expirations[key] = time.time() + expiry return self.storage.get(key, 0) def decr(self, key: str, amount: int = 1) -> int: """ decrements the counter for a given rate limit key :param key: the key to decrement :param amount: the number to decrement by """ self.get(key) self.__schedule_expiry() with self.locks[key]: self.storage[key] = max(self.storage[key] - amount, 0) return self.storage.get(key, 0) def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ if self.expirations.get(key, 0) <= time.time(): self.storage.pop(key, None) self.expirations.pop(key, None) self.locks.pop(key, None) return self.storage.get(key, 0) def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ self.storage.pop(key, None) self.expirations.pop(key, None) self.events.pop(key, None) self.locks.pop(key, None) def acquire_entry(self, key: str, limit: int, expiry: int, amount: int = 1) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ if amount > limit: return False self.__schedule_expiry() with self.locks[key]: self.events.setdefault(key, []) timestamp = time.time() try: entry = self.events[key][limit - amount] except IndexError: entry = None if entry and entry.atime >= timestamp - expiry: return False else: self.events[key][:0] = [Entry(expiry) for _ in range(amount)] return True def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ return self.expirations.get(key, time.time()) def get_num_acquired(self, key: str, expiry: int) -> int: """ returns the number of entries already acquired :param key: rate limit key to acquire an entry in :param expiry: expiry of the entry """ timestamp = time.time() return ( len([k for k in self.events.get(key, []) if k.atime >= timestamp - expiry]) if self.events.get(key) else 0 ) def get_moving_window(self, key: str, limit: int, expiry: int) -> tuple[float, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ timestamp = time.time() acquired = self.get_num_acquired(key, expiry) for item in self.events.get(key, [])[::-1]: if item.atime >= timestamp - expiry: return item.atime, acquired return timestamp, acquired def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: if amount > limit: return False now = time.time() previous_key, current_key = self.sliding_window_keys(key, expiry, now) ( previous_count, previous_ttl, current_count, _, ) = self._get_sliding_window_info(previous_key, current_key, expiry, now) weighted_count = previous_count * previous_ttl / expiry + current_count if floor(weighted_count) + amount > limit: return False else: # Hit, increase the current counter. # If the counter doesn't exist yet, set twice the theorical expiry. current_count = self.incr(current_key, 2 * expiry, amount=amount) weighted_count = previous_count * previous_ttl / expiry + current_count if floor(weighted_count) > limit: # Another hit won the race condition: revert the incrementation and refuse this hit # Limitation: during high concurrency at the end of the window, # the counter is shifted and cannot be decremented, so less requests than expected are allowed. self.decr(current_key, amount) return False return True def _get_sliding_window_info( self, previous_key: str, current_key: str, expiry: int, now: float, ) -> tuple[int, float, int, float]: previous_count = self.get(previous_key) current_count = self.get(current_key) if previous_count == 0: previous_ttl = float(0) else: previous_ttl = (1 - (((now - expiry) / expiry) % 1)) * expiry current_ttl = (1 - ((now / expiry) % 1)) * expiry + expiry return previous_count, previous_ttl, current_count, current_ttl def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: now = time.time() previous_key, current_key = self.sliding_window_keys(key, expiry, now) return self._get_sliding_window_info(previous_key, current_key, expiry, now) def check(self) -> bool: """ check if storage is healthy """ return True def reset(self) -> int | None: num_items = max(len(self.storage), len(self.events)) self.storage.clear() self.expirations.clear() self.events.clear() self.locks.clear() return num_items limits-4.4.1/limits/storage/mongodb.py000066400000000000000000000434601476517132700200110ustar00rootroot00000000000000from __future__ import annotations import datetime import time from abc import ABC, abstractmethod from deprecated.sphinx import versionadded, versionchanged from limits.typing import ( MongoClient, MongoCollection, MongoDatabase, cast, ) from ..util import get_dependency from .base import MovingWindowSupport, SlidingWindowCounterSupport, Storage class MongoDBStorageBase( Storage, MovingWindowSupport, SlidingWindowCounterSupport, ABC ): """ Rate limit storage with MongoDB as backend. Depends on :pypi:`pymongo`. """ DEPENDENCIES = ["pymongo"] def __init__( self, uri: str, database_name: str = "limits", counter_collection_name: str = "counters", window_collection_name: str = "windows", wrap_exceptions: bool = False, **options: int | str | bool, ) -> None: """ :param uri: uri of the form ``mongodb://[user:password]@host:port?...``, This uri is passed directly to :class:`~pymongo.mongo_client.MongoClient` :param database_name: The database to use for storing the rate limit collections. :param counter_collection_name: The collection name to use for individual counters used in fixed window strategies :param window_collection_name: The collection name to use for sliding & moving window storage :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed to the constructor of :class:`~pymongo.mongo_client.MongoClient` :raise ConfigurationError: when the :pypi:`pymongo` library is not available """ super().__init__(uri, wrap_exceptions=wrap_exceptions, **options) self._database_name = database_name self._collection_mapping = { "counters": counter_collection_name, "windows": window_collection_name, } self.lib = self.dependencies["pymongo"].module self.lib_errors, _ = get_dependency("pymongo.errors") self._storage_uri = uri self._storage_options = options self._storage: MongoClient | None = None @property def storage(self) -> MongoClient: if self._storage is None: self._storage = self._init_mongo_client( self._storage_uri, **self._storage_options ) self.__initialize_database() return self._storage @property def _database(self) -> MongoDatabase: return self.storage[self._database_name] @property def counters(self) -> MongoCollection: return self._database[self._collection_mapping["counters"]] @property def windows(self) -> MongoCollection: return self._database[self._collection_mapping["windows"]] @abstractmethod def _init_mongo_client( self, uri: str | None, **options: int | str | bool ) -> MongoClient: raise NotImplementedError() @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return self.lib_errors.PyMongoError # type: ignore def __initialize_database(self) -> None: self.counters.create_index("expireAt", expireAfterSeconds=0) self.windows.create_index("expireAt", expireAfterSeconds=0) def reset(self) -> int | None: """ Delete all rate limit keys in the rate limit collections (counters, windows) """ num_keys = self.counters.count_documents({}) + self.windows.count_documents({}) self.counters.drop() self.windows.drop() return int(num_keys) def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ self.counters.find_one_and_delete({"_id": key}) self.windows.find_one_and_delete({"_id": key}) def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ counter = self.counters.find_one({"_id": key}) return ( (counter["expireAt"] if counter else datetime.datetime.now()) .replace(tzinfo=datetime.timezone.utc) .timestamp() ) def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ counter = self.counters.find_one( { "_id": key, "expireAt": {"$gte": datetime.datetime.now(datetime.timezone.utc)}, }, projection=["count"], ) return counter and counter["count"] or 0 def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param amount: the number to increment by """ expiration = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( seconds=expiry ) return int( self.counters.find_one_and_update( {"_id": key}, [ { "$set": { "count": { "$cond": { "if": {"$lt": ["$expireAt", "$$NOW"]}, "then": amount, "else": {"$add": ["$count", amount]}, } }, "expireAt": { "$cond": { "if": {"$lt": ["$expireAt", "$$NOW"]}, "then": expiration, "else": ( expiration if elastic_expiry else "$expireAt" ), } }, } }, ], upsert=True, projection=["count"], return_document=self.lib.ReturnDocument.AFTER, )["count"] ) def check(self) -> bool: """ Check if storage is healthy by calling :meth:`pymongo.mongo_client.MongoClient.server_info` """ try: self.storage.server_info() return True except: # noqa: E722 return False def get_moving_window(self, key: str, limit: int, expiry: int) -> tuple[float, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ timestamp = time.time() result = list( self.windows.aggregate( [ {"$match": {"_id": key}}, { "$project": { "entries": { "$filter": { "input": "$entries", "as": "entry", "cond": {"$gte": ["$$entry", timestamp - expiry]}, } } } }, {"$unwind": "$entries"}, { "$group": { "_id": "$_id", "min": {"$min": "$entries"}, "count": {"$sum": 1}, } }, ] ) ) if result: return result[0]["min"], result[0]["count"] return timestamp, 0 def acquire_entry(self, key: str, limit: int, expiry: int, amount: int = 1) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ if amount > limit: return False timestamp = time.time() try: updates: dict[ str, dict[str, datetime.datetime | dict[str, list[float] | int]], ] = { "$push": { "entries": { "$each": [timestamp] * amount, "$position": 0, "$slice": limit, } }, "$set": { "expireAt": ( datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(seconds=expiry) ) }, } self.windows.update_one( { "_id": key, f"entries.{limit - amount}": {"$not": {"$gte": timestamp - expiry}}, }, updates, upsert=True, ) return True except self.lib.errors.DuplicateKeyError: return False def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: expiry_ms = expiry * 1000 if result := self.windows.find_one_and_update( {"_id": key}, [ { "$set": { "previousCount": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": {"$ifNull": ["$currentCount", 0]}, "else": {"$ifNull": ["$previousCount", 0]}, } }, "currentCount": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": 0, "else": {"$ifNull": ["$currentCount", 0]}, } }, "expiresAt": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": { "$add": ["$expiresAt", expiry_ms], }, "else": "$expiresAt", } }, } } ], return_document=self.lib.ReturnDocument.AFTER, projection=["currentCount", "previousCount", "expiresAt"], ): expires_at = ( (result["expiresAt"].replace(tzinfo=datetime.timezone.utc).timestamp()) if result.get("expiresAt") else time.time() ) current_ttl = max(0, expires_at - time.time()) prev_ttl = max(0, current_ttl - expiry if result["previousCount"] else 0) return ( result["previousCount"], prev_ttl, result["currentCount"], current_ttl, ) return 0, 0.0, 0, 0.0 def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: expiry_ms = expiry * 1000 result = self.windows.find_one_and_update( {"_id": key}, [ { "$set": { "previousCount": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": {"$ifNull": ["$currentCount", 0]}, "else": {"$ifNull": ["$previousCount", 0]}, } }, } }, { "$set": { "currentCount": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": 0, "else": {"$ifNull": ["$currentCount", 0]}, } }, "expiresAt": { "$cond": { "if": { "$lte": [ {"$subtract": ["$expiresAt", "$$NOW"]}, expiry_ms, ] }, "then": { "$cond": { "if": {"$gt": ["$expiresAt", 0]}, "then": {"$add": ["$expiresAt", expiry_ms]}, "else": {"$add": ["$$NOW", 2 * expiry_ms]}, } }, "else": "$expiresAt", } }, } }, { "$set": { "curWeightedCount": { "$floor": { "$add": [ { "$multiply": [ "$previousCount", { "$divide": [ { "$max": [ 0, { "$subtract": [ "$expiresAt", { "$add": [ "$$NOW", expiry_ms, ] }, ] }, ] }, expiry_ms, ] }, ] }, "$currentCount", ] } } } }, { "$set": { "currentCount": { "$cond": { "if": { "$lte": [ {"$add": ["$curWeightedCount", amount]}, limit, ] }, "then": {"$add": ["$currentCount", amount]}, "else": "$currentCount", } } } }, { "$set": { "_acquired": { "$lte": [{"$add": ["$curWeightedCount", amount]}, limit] } } }, {"$unset": ["curWeightedCount"]}, ], return_document=self.lib.ReturnDocument.AFTER, upsert=True, ) return cast(bool, result["_acquired"]) @versionadded(version="2.1") @versionchanged( version="3.14.0", reason="Added option to select custom collection names for windows & counters", ) class MongoDBStorage(MongoDBStorageBase): STORAGE_SCHEME = ["mongodb", "mongodb+srv"] def _init_mongo_client( self, uri: str | None, **options: int | str | bool ) -> MongoClient: return cast(MongoClient, self.lib.MongoClient(uri, **options)) limits-4.4.1/limits/storage/redis.py000066400000000000000000000245651476517132700174770ustar00rootroot00000000000000from __future__ import annotations import time from typing import TYPE_CHECKING, cast from deprecated.sphinx import versionchanged from packaging.version import Version from limits.typing import Literal, RedisClient from ..util import get_package_data from .base import MovingWindowSupport, SlidingWindowCounterSupport, Storage if TYPE_CHECKING: import redis @versionchanged( version="4.3", reason=( "Added support for using the redis client from :pypi:`valkey`" " if :paramref:`uri` has the ``valkey://`` schema" ), ) class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport): """ Rate limit storage with redis as backend. Depends on :pypi:`redis` (or :pypi:`valkey` if :paramref:`uri` starts with ``valkey://``) """ STORAGE_SCHEME = [ "redis", "rediss", "redis+unix", "valkey", "valkeys", "valkey+unix", ] """The storage scheme for redis""" DEPENDENCIES = {"redis": Version("3.0"), "valkey": Version("6.0")} RES_DIR = "resources/redis/lua_scripts" SCRIPT_MOVING_WINDOW = get_package_data(f"{RES_DIR}/moving_window.lua") SCRIPT_ACQUIRE_MOVING_WINDOW = get_package_data( f"{RES_DIR}/acquire_moving_window.lua" ) SCRIPT_CLEAR_KEYS = get_package_data(f"{RES_DIR}/clear_keys.lua") SCRIPT_INCR_EXPIRE = get_package_data(f"{RES_DIR}/incr_expire.lua") SCRIPT_SLIDING_WINDOW = get_package_data(f"{RES_DIR}/sliding_window.lua") SCRIPT_ACQUIRE_SLIDING_WINDOW = get_package_data( f"{RES_DIR}/acquire_sliding_window.lua" ) lua_moving_window: redis.commands.core.Script lua_acquire_moving_window: redis.commands.core.Script lua_sliding_window: redis.commands.core.Script lua_acquire_sliding_window: redis.commands.core.Script PREFIX = "LIMITS" target_server: Literal["redis", "valkey"] def __init__( self, uri: str, connection_pool: redis.connection.ConnectionPool | None = None, wrap_exceptions: bool = False, **options: float | str | bool, ) -> None: """ :param uri: uri of the form ``redis://[:password]@host:port``, ``redis://[:password]@host:port/db``, ``rediss://[:password]@host:port``, ``redis+unix:///path/to/sock`` etc. This uri is passed directly to :func:`redis.from_url` except for the case of ``redis+unix://`` where it is replaced with ``unix://``. If the uri scheme is ``valkey`` the implementation used will be from :pypi:`valkey`. :param connection_pool: if provided, the redis client is initialized with the connection pool and any other params passed as :paramref:`options` :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`redis.Redis` :raise ConfigurationError: when the :pypi:`redis` library is not available """ super().__init__(uri, wrap_exceptions=wrap_exceptions, **options) self.target_server = "valkey" if uri.startswith("valkey") else "redis" self.dependency = self.dependencies[self.target_server].module uri = uri.replace(f"{self.target_server}+unix", "unix") if not connection_pool: self.storage = self.dependency.from_url(uri, **options) else: if self.target_server == "redis": self.storage = self.dependency.Redis( connection_pool=connection_pool, **options ) else: self.storage = self.dependency.Valkey( connection_pool=connection_pool, **options ) self.initialize_storage(uri) @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return ( # type: ignore[no-any-return] self.dependency.RedisError if self.target_server == "redis" else self.dependency.ValkeyError ) def initialize_storage(self, _uri: str) -> None: self.lua_moving_window = self.get_connection().register_script( self.SCRIPT_MOVING_WINDOW ) self.lua_acquire_moving_window = self.get_connection().register_script( self.SCRIPT_ACQUIRE_MOVING_WINDOW ) self.lua_clear_keys = self.get_connection().register_script( self.SCRIPT_CLEAR_KEYS ) self.lua_incr_expire = self.get_connection().register_script( self.SCRIPT_INCR_EXPIRE ) self.lua_sliding_window = self.get_connection().register_script( self.SCRIPT_SLIDING_WINDOW ) self.lua_acquire_sliding_window = self.get_connection().register_script( self.SCRIPT_ACQUIRE_SLIDING_WINDOW ) def get_connection(self, readonly: bool = False) -> RedisClient: return cast(RedisClient, self.storage) def _current_window_key(self, key: str) -> str: """ Return the current window's storage key (Sliding window strategy) Contrary to other strategies that have one key per rate limit item, this strategy has two keys per rate limit item than must be on the same machine. To keep the current key and the previous key on the same Redis cluster node, curly braces are added. Eg: "{constructed_key}" """ return f"{{{key}}}" def _previous_window_key(self, key: str) -> str: """ Return the previous window's storage key (Sliding window strategy). Curvy braces are added on the common pattern with the current window's key, so the current and the previous key are stored on the same Redis cluster node. Eg: "{constructed_key}/-1" """ return f"{self._current_window_key(key)}/-1" def prefixed_key(self, key: str) -> str: return f"{self.PREFIX}:{key}" def get_moving_window(self, key: str, limit: int, expiry: int) -> tuple[float, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ key = self.prefixed_key(key) timestamp = time.time() if window := self.lua_moving_window([key], [timestamp - expiry, limit]): return float(window[0]), window[1] return timestamp, 0 def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: previous_key = self.prefixed_key(self._previous_window_key(key)) current_key = self.prefixed_key(self._current_window_key(key)) if window := self.lua_sliding_window([previous_key, current_key], [expiry]): return ( int(window[0] or 0), max(0, float(window[1] or 0)) / 1000, int(window[2] or 0), max(0, float(window[3] or 0)) / 1000, ) return 0, 0.0, 0, 0.0 def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1, ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param amount: the number to increment by """ key = self.prefixed_key(key) if elastic_expiry: value = self.get_connection().incrby(key, amount) self.get_connection().expire(key, expiry) return value else: return int(self.lua_incr_expire([key], [expiry, amount])) def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ key = self.prefixed_key(key) return int(self.get_connection(True).get(key) or 0) def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ key = self.prefixed_key(key) self.get_connection().delete(key) def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ key = self.prefixed_key(key) timestamp = time.time() acquired = self.lua_acquire_moving_window( [key], [timestamp, limit, expiry, amount] ) return bool(acquired) def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1, ) -> bool: """ Acquire an entry. Shift the current window to the previous window if it expired. :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ previous_key = self.prefixed_key(self._previous_window_key(key)) current_key = self.prefixed_key(self._current_window_key(key)) acquired = self.lua_acquire_sliding_window( [previous_key, current_key], [limit, expiry, amount] ) return bool(acquired) def get_expiry(self, key: str) -> float: """ :param key: the key to get the expiry for """ key = self.prefixed_key(key) return max(self.get_connection(True).ttl(key), 0) + time.time() def check(self) -> bool: """ check if storage is healthy """ try: return self.get_connection().ping() except: # noqa return False def reset(self) -> int | None: """ This function calls a Lua Script to delete keys prefixed with ``self.PREFIX`` in blocks of 5000. .. warning:: This operation was designed to be fast, but was not tested on a large production based system. Be careful with its usage as it could be slow on very large data sets. """ prefix = self.prefixed_key("*") return int(self.lua_clear_keys([prefix])) limits-4.4.1/limits/storage/redis_cluster.py000066400000000000000000000105371476517132700212320ustar00rootroot00000000000000from __future__ import annotations import urllib from deprecated.sphinx import versionchanged from packaging.version import Version from limits.storage.redis import RedisStorage @versionchanged( version="3.14.0", reason=""" Dropped support for the :pypi:`redis-py-cluster` library which has been abandoned/deprecated. """, ) @versionchanged( version="2.5.0", reason=""" Cluster support was provided by the :pypi:`redis-py-cluster` library which has been absorbed into the official :pypi:`redis` client. By default the :class:`redis.cluster.RedisCluster` client will be used however if the version of the package is lower than ``4.2.0`` the implementation will fallback to trying to use :class:`rediscluster.RedisCluster`. """, ) @versionchanged( version="4.3", reason=( "Added support for using the redis client from :pypi:`valkey`" " if :paramref:`uri` has the ``valkey+cluster://`` schema" ), ) class RedisClusterStorage(RedisStorage): """ Rate limit storage with redis cluster as backend Depends on :pypi:`redis` (or :pypi:`valkey` if :paramref:`uri` starts with ``valkey+cluster://``). """ STORAGE_SCHEME = ["redis+cluster", "valkey+cluster"] """The storage scheme for redis cluster""" DEFAULT_OPTIONS: dict[str, float | str | bool] = { "max_connections": 1000, } "Default options passed to the :class:`~redis.cluster.RedisCluster`" DEPENDENCIES = { "redis": Version("4.2.0"), "valkey": Version("6.0"), } def __init__( self, uri: str, wrap_exceptions: bool = False, **options: float | str | bool, ) -> None: """ :param uri: url of the form ``redis+cluster://[:password]@host:port,host:port`` If the uri scheme is ``valkey+cluster`` the implementation used will be from :pypi:`valkey`. :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`redis.cluster.RedisCluster` :raise ConfigurationError: when the :pypi:`redis` library is not available or if the redis cluster cannot be reached. """ parsed = urllib.parse.urlparse(uri) parsed_auth: dict[str, float | str | bool] = {} if parsed.username: parsed_auth["username"] = parsed.username if parsed.password: parsed_auth["password"] = parsed.password sep = parsed.netloc.find("@") + 1 cluster_hosts = [] for loc in parsed.netloc[sep:].split(","): host, port = loc.split(":") cluster_hosts.append((host, int(port))) self.storage = None self.target_server = "valkey" if uri.startswith("valkey") else "redis" merged_options = {**self.DEFAULT_OPTIONS, **parsed_auth, **options} self.dependency = self.dependencies[self.target_server].module startup_nodes = [self.dependency.cluster.ClusterNode(*c) for c in cluster_hosts] if self.target_server == "redis": self.storage = self.dependency.cluster.RedisCluster( startup_nodes=startup_nodes, **merged_options ) else: self.storage = self.dependency.cluster.ValkeyCluster( startup_nodes=startup_nodes, **merged_options ) assert self.storage self.initialize_storage(uri) super(RedisStorage, self).__init__(uri, wrap_exceptions, **options) def reset(self) -> int | None: """ Redis Clusters are sharded and deleting across shards can't be done atomically. Because of this, this reset loops over all keys that are prefixed with ``self.PREFIX`` and calls delete on them, one at a time. .. warning:: This operation was not tested with extremely large data sets. On a large production based system, care should be taken with its usage as it could be slow on very large data sets""" prefix = self.prefixed_key("*") count = 0 for primary in self.storage.get_primaries(): node = self.storage.get_redis_connection(primary) keys = node.keys(prefix) count += sum([node.delete(k.decode("utf-8")) for k in keys]) return count limits-4.4.1/limits/storage/redis_sentinel.py000066400000000000000000000103471476517132700213710ustar00rootroot00000000000000from __future__ import annotations import urllib.parse from typing import TYPE_CHECKING from deprecated.sphinx import versionchanged from packaging.version import Version from limits.errors import ConfigurationError from limits.storage.redis import RedisStorage from limits.typing import RedisClient if TYPE_CHECKING: pass @versionchanged( version="4.3", reason=( "Added support for using the redis client from :pypi:`valkey`" " if :paramref:`uri` has the ``valkey+sentinel://`` schema" ), ) class RedisSentinelStorage(RedisStorage): """ Rate limit storage with redis sentinel as backend Depends on :pypi:`redis` package (or :pypi:`valkey` if :paramref:`uri` starts with ``valkey+sentinel://``) """ STORAGE_SCHEME = ["redis+sentinel", "valkey+sentinel"] """The storage scheme for redis accessed via a redis sentinel installation""" DEPENDENCIES = { "redis": Version("3.0"), "redis.sentinel": Version("3.0"), "valkey": Version("6.0"), "valkey.sentinel": Version("6.0"), } def __init__( self, uri: str, service_name: str | None = None, use_replicas: bool = True, sentinel_kwargs: dict[str, float | str | bool] | None = None, wrap_exceptions: bool = False, **options: float | str | bool, ) -> None: """ :param uri: url of the form ``redis+sentinel://host:port,host:port/service_name`` If the uri scheme is ``valkey+sentinel`` the implementation used will be from :pypi:`valkey`. :param service_name: sentinel service name (if not provided in :attr:`uri`) :param use_replicas: Whether to use replicas for read only operations :param sentinel_kwargs: kwargs to pass as :attr:`sentinel_kwargs` to :class:`redis.sentinel.Sentinel` :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`redis.sentinel.Sentinel` :raise ConfigurationError: when the redis library is not available or if the redis master host cannot be pinged. """ super(RedisStorage, self).__init__( uri, wrap_exceptions=wrap_exceptions, **options ) parsed = urllib.parse.urlparse(uri) sentinel_configuration = [] sentinel_options = sentinel_kwargs.copy() if sentinel_kwargs else {} parsed_auth: dict[str, float | str | bool] = {} if parsed.username: parsed_auth["username"] = parsed.username if parsed.password: parsed_auth["password"] = parsed.password sep = parsed.netloc.find("@") + 1 for loc in parsed.netloc[sep:].split(","): host, port = loc.split(":") sentinel_configuration.append((host, int(port))) self.service_name = ( parsed.path.replace("/", "") if parsed.path else service_name ) if self.service_name is None: raise ConfigurationError("'service_name' not provided") self.target_server = "valkey" if uri.startswith("valkey") else "redis" sentinel_dep = self.dependencies[f"{self.target_server}.sentinel"].module self.sentinel = sentinel_dep.Sentinel( sentinel_configuration, sentinel_kwargs={**parsed_auth, **sentinel_options}, **{**parsed_auth, **options}, ) self.storage: RedisClient = self.sentinel.master_for(self.service_name) self.storage_slave: RedisClient = self.sentinel.slave_for(self.service_name) self.use_replicas = use_replicas self.initialize_storage(uri) @property def base_exceptions( self, ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover return ( # type: ignore[no-any-return] self.dependencies["redis"].module.RedisError if self.target_server == "redis" else self.dependencies["valkey"].module.ValkeyError ) def get_connection(self, readonly: bool = False) -> RedisClient: return self.storage_slave if (readonly and self.use_replicas) else self.storage limits-4.4.1/limits/storage/registry.py000066400000000000000000000012121476517132700202210ustar00rootroot00000000000000from __future__ import annotations from abc import ABCMeta SCHEMES: dict[str, StorageRegistry] = {} class StorageRegistry(ABCMeta): def __new__( mcs, name: str, bases: tuple[type, ...], dct: dict[str, str | list[str]] ) -> StorageRegistry: storage_scheme = dct.get("STORAGE_SCHEME", None) cls = super().__new__(mcs, name, bases, dct) if storage_scheme: if isinstance(storage_scheme, str): # noqa schemes = [storage_scheme] else: schemes = storage_scheme for scheme in schemes: SCHEMES[scheme] = cls return cls limits-4.4.1/limits/strategies.py000066400000000000000000000252231476517132700170670ustar00rootroot00000000000000""" Rate limiting strategies """ from __future__ import annotations import time from abc import ABCMeta, abstractmethod from math import floor, inf from deprecated.sphinx import deprecated, versionadded from limits.storage.base import SlidingWindowCounterSupport from .limits import RateLimitItem from .storage import MovingWindowSupport, Storage, StorageTypes from .typing import cast from .util import WindowStats class RateLimiter(metaclass=ABCMeta): def __init__(self, storage: StorageTypes): assert isinstance(storage, Storage) self.storage: Storage = storage @abstractmethod def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The cost of this hit, default 1 """ raise NotImplementedError @abstractmethod def test(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Check the rate limit without consuming from it. :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The expected cost to be consumed, default 1 """ raise NotImplementedError @abstractmethod def get_window_stats(self, item: RateLimitItem, *identifiers: str) -> WindowStats: """ Query the reset time and remaining amount for the limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :return: (reset time, remaining) """ raise NotImplementedError def clear(self, item: RateLimitItem, *identifiers: str) -> None: return self.storage.clear(item.key_for(*identifiers)) class MovingWindowRateLimiter(RateLimiter): """ Reference: :ref:`strategies:moving window` """ def __init__(self, storage: StorageTypes): if not ( hasattr(storage, "acquire_entry") or hasattr(storage, "get_moving_window") ): raise NotImplementedError( "MovingWindowRateLimiting is not implemented for storage " f"of type {storage.__class__}" ) super().__init__(storage) def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The cost of this hit, default 1 :return: (reset time, remaining) """ return cast(MovingWindowSupport, self.storage).acquire_entry( item.key_for(*identifiers), item.amount, item.get_expiry(), amount=cost ) def test(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Check if the rate limit can be consumed :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The expected cost to be consumed, default 1 """ return ( cast(MovingWindowSupport, self.storage).get_moving_window( item.key_for(*identifiers), item.amount, item.get_expiry(), )[1] <= item.amount - cost ) def get_window_stats(self, item: RateLimitItem, *identifiers: str) -> WindowStats: """ returns the number of requests remaining within this limit. :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :return: tuple (reset time, remaining) """ window_start, window_items = cast( MovingWindowSupport, self.storage ).get_moving_window(item.key_for(*identifiers), item.amount, item.get_expiry()) reset = window_start + item.get_expiry() return WindowStats(reset, item.amount - window_items) class FixedWindowRateLimiter(RateLimiter): """ Reference: :ref:`strategies:fixed window` """ def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The cost of this hit, default 1 """ return ( self.storage.incr( item.key_for(*identifiers), item.get_expiry(), elastic_expiry=False, amount=cost, ) <= item.amount ) def test(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Check if the rate limit can be consumed :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The expected cost to be consumed, default 1 """ return self.storage.get(item.key_for(*identifiers)) < item.amount - cost + 1 def get_window_stats(self, item: RateLimitItem, *identifiers: str) -> WindowStats: """ Query the reset time and remaining amount for the limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :return: (reset time, remaining) """ remaining = max(0, item.amount - self.storage.get(item.key_for(*identifiers))) reset = self.storage.get_expiry(item.key_for(*identifiers)) return WindowStats(reset, remaining) @versionadded(version="4.1") class SlidingWindowCounterRateLimiter(RateLimiter): """ Reference: :ref:`strategies:sliding window counter` """ def __init__(self, storage: StorageTypes): if not hasattr(storage, "get_sliding_window") or not hasattr( storage, "acquire_sliding_window_entry" ): raise NotImplementedError( "SlidingWindowCounterRateLimiting is not implemented for storage " f"of type {storage.__class__}" ) super().__init__(storage) def _weighted_count( self, item: RateLimitItem, previous_count: int, previous_expires_in: float, current_count: int, ) -> float: """ Return the approximated by weighting the previous window count and adding the current window count. """ return previous_count * previous_expires_in / item.get_expiry() + current_count def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The cost of this hit, default 1 """ return cast( SlidingWindowCounterSupport, self.storage ).acquire_sliding_window_entry( item.key_for(*identifiers), item.amount, item.get_expiry(), cost, ) def test(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Check if the rate limit can be consumed :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The expected cost to be consumed, default 1 """ previous_count, previous_expires_in, current_count, _ = cast( SlidingWindowCounterSupport, self.storage ).get_sliding_window(item.key_for(*identifiers), item.get_expiry()) return ( self._weighted_count( item, previous_count, previous_expires_in, current_count ) < item.amount - cost + 1 ) def get_window_stats(self, item: RateLimitItem, *identifiers: str) -> WindowStats: """ Query the reset time and remaining amount for the limit. :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :return: WindowStats(reset time, remaining) """ previous_count, previous_expires_in, current_count, current_expires_in = cast( SlidingWindowCounterSupport, self.storage ).get_sliding_window(item.key_for(*identifiers), item.get_expiry()) remaining = max( 0, item.amount - floor( self._weighted_count( item, previous_count, previous_expires_in, current_count ) ), ) now = time.time() if not (previous_count or current_count): return WindowStats(now, remaining) expiry = item.get_expiry() previous_reset_in, current_reset_in = inf, inf if previous_count: previous_reset_in = previous_expires_in % (expiry / previous_count) if current_count: current_reset_in = current_expires_in % expiry return WindowStats(now + min(previous_reset_in, current_reset_in), remaining) @deprecated(version="4.1", action="always") class FixedWindowElasticExpiryRateLimiter(FixedWindowRateLimiter): """ Reference: :ref:`strategies:fixed window with elastic expiry` """ def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The cost of this hit, default 1 """ return ( self.storage.incr( item.key_for(*identifiers), item.get_expiry(), elastic_expiry=True, amount=cost, ) <= item.amount ) KnownStrategy = ( type[SlidingWindowCounterRateLimiter] | type[FixedWindowRateLimiter] | type[FixedWindowElasticExpiryRateLimiter] | type[MovingWindowRateLimiter] ) STRATEGIES: dict[str, KnownStrategy] = { "sliding-window-counter": SlidingWindowCounterRateLimiter, "fixed-window": FixedWindowRateLimiter, "fixed-window-elastic-expiry": FixedWindowElasticExpiryRateLimiter, "moving-window": MovingWindowRateLimiter, } limits-4.4.1/limits/typing.py000066400000000000000000000105531476517132700162270ustar00rootroot00000000000000from __future__ import annotations from collections import Counter from collections.abc import Awaitable, Callable, Iterable from typing import ( TYPE_CHECKING, Any, ClassVar, Literal, NamedTuple, ParamSpec, Protocol, TypeAlias, TypeVar, cast, ) Serializable = int | str | float R = TypeVar("R") R_co = TypeVar("R_co", covariant=True) P = ParamSpec("P") if TYPE_CHECKING: import coredis import pymongo.collection import pymongo.database import pymongo.mongo_client import redis class ItemP(Protocol): value: bytes flags: int | None cas: int | None class EmcacheClientP(Protocol): async def add( self, key: bytes, value: bytes, *, flags: int = 0, exptime: int = 0, noreply: bool = False, ) -> None: ... async def get(self, key: bytes, return_flags: bool = False) -> ItemP | None: ... async def get_many(self, keys: Iterable[bytes]) -> dict[bytes, ItemP]: ... async def gets(self, key: bytes, return_flags: bool = False) -> ItemP | None: ... async def increment( self, key: bytes, value: int, *, noreply: bool = False ) -> int | None: ... async def decrement( self, key: bytes, value: int, *, noreply: bool = False ) -> int | None: ... async def delete(self, key: bytes, *, noreply: bool = False) -> None: ... async def set( self, key: bytes, value: bytes, *, flags: int = 0, exptime: int = 0, noreply: bool = False, ) -> None: ... async def touch( self, key: bytes, exptime: int, *, noreply: bool = False ) -> None: ... class MemcachedClientP(Protocol): def add( self, key: str, value: Serializable, expire: int | None = 0, noreply: bool | None = None, flags: int | None = None, ) -> bool: ... def get(self, key: str, default: str | None = None) -> bytes: ... def get_many(self, keys: Iterable[str]) -> dict[str, Any]: ... # type:ignore[explicit-any] def incr( self, key: str, value: int, noreply: bool | None = False ) -> int | None: ... def decr( self, key: str, value: int, noreply: bool | None = False, ) -> int | None: ... def delete(self, key: str, noreply: bool | None = None) -> bool | None: ... def set( self, key: str, value: Serializable, expire: int = 0, noreply: bool | None = None, flags: int | None = None, ) -> bool: ... def touch( self, key: str, expire: int | None = 0, noreply: bool | None = None ) -> bool: ... class RedisClientP(Protocol): def incrby(self, key: str, amount: int) -> int: ... def get(self, key: str) -> bytes | None: ... def delete(self, key: str) -> int: ... def ttl(self, key: str) -> int: ... def expire(self, key: str, seconds: int) -> bool: ... def ping(self) -> bool: ... def register_script(self, script: bytes) -> redis.commands.core.Script: ... class AsyncRedisClientP(Protocol): async def incrby(self, key: str, amount: int) -> int: ... async def get(self, key: str) -> bytes | None: ... async def delete(self, key: str) -> int: ... async def ttl(self, key: str) -> int: ... async def expire(self, key: str, seconds: int) -> bool: ... async def ping(self) -> bool: ... def register_script(self, script: bytes) -> redis.commands.core.Script: ... RedisClient: TypeAlias = RedisClientP AsyncRedisClient: TypeAlias = AsyncRedisClientP AsyncCoRedisClient: TypeAlias = "coredis.Redis[bytes] | coredis.RedisCluster[bytes]" MongoClient: TypeAlias = "pymongo.mongo_client.MongoClient[dict[str, Any]]" # type:ignore[explicit-any] MongoDatabase: TypeAlias = "pymongo.database.Database[dict[str, Any]]" # type:ignore[explicit-any] MongoCollection: TypeAlias = "pymongo.collection.Collection[dict[str, Any]]" # type:ignore[explicit-any] __all__ = [ "TYPE_CHECKING", "Any", "AsyncRedisClient", "Awaitable", "Callable", "ClassVar", "Counter", "EmcacheClientP", "ItemP", "Literal", "MemcachedClientP", "MongoClient", "MongoCollection", "MongoDatabase", "NamedTuple", "P", "ParamSpec", "Protocol", "R", "R_co", "RedisClient", "Serializable", "TypeAlias", "TypeVar", "cast", ] limits-4.4.1/limits/util.py000066400000000000000000000135621476517132700156750ustar00rootroot00000000000000""" """ from __future__ import annotations import dataclasses import importlib.resources import re import sys from collections import UserDict from types import ModuleType from typing import TYPE_CHECKING from packaging.version import Version from limits.typing import NamedTuple from .errors import ConfigurationError from .limits import GRANULARITIES, RateLimitItem SEPARATORS = re.compile(r"[,;|]{1}") SINGLE_EXPR = re.compile( r""" \s*([0-9]+) \s*(/|\s*per\s*) \s*([0-9]+) *\s*(hour|minute|second|day|month|year)s?\s*""", re.IGNORECASE | re.VERBOSE, ) EXPR = re.compile( rf"^{SINGLE_EXPR.pattern}(:?{SEPARATORS.pattern}{SINGLE_EXPR.pattern})*$", re.IGNORECASE | re.VERBOSE, ) class WindowStats(NamedTuple): """ tuple to describe a rate limited window """ #: Time as seconds since the Epoch when this window will be reset reset_time: float #: Quantity remaining in this window remaining: int @dataclasses.dataclass class Dependency: name: str version_required: Version | None version_found: Version | None module: ModuleType MissingModule = ModuleType("Missing") if TYPE_CHECKING: _UserDict = UserDict[str, Dependency] else: _UserDict = UserDict class DependencyDict(_UserDict): def __getitem__(self, key: str) -> Dependency: dependency = super().__getitem__(key) if dependency.module is MissingModule: message = f"'{dependency.name}' prerequisite not available." if dependency.version_required: message += ( f" A minimum version of {dependency.version_required} is required." if dependency.version_required else "" ) message += ( " See https://limits.readthedocs.io/en/stable/storage.html#supported-versions" " for more details." ) raise ConfigurationError(message) elif dependency.version_required and ( not dependency.version_found or dependency.version_found < dependency.version_required ): raise ConfigurationError( f"The minimum version of {dependency.version_required}" f" for '{dependency.name}' could not be found. Found version: {dependency.version_found}" ) return dependency class LazyDependency: """ Simple utility that provides an :attr:`dependency` to the child class to fetch any dependencies without having to import them explicitly. """ DEPENDENCIES: dict[str, Version | None] | list[str] = [] """ The python modules this class has a dependency on. Used to lazily populate the :attr:`dependencies` """ def __init__(self) -> None: self._dependencies: DependencyDict = DependencyDict() @property def dependencies(self) -> DependencyDict: """ Cached mapping of the modules this storage depends on. This is done so that the module is only imported lazily when the storage is instantiated. :meta private: """ if not getattr(self, "_dependencies", None): dependencies = DependencyDict() mapping: dict[str, Version | None] if isinstance(self.DEPENDENCIES, list): mapping = {dependency: None for dependency in self.DEPENDENCIES} else: mapping = self.DEPENDENCIES for name, minimum_version in mapping.items(): dependency, version = get_dependency(name) dependencies[name] = Dependency( name, minimum_version, version, dependency ) self._dependencies = dependencies return self._dependencies def get_dependency(module_path: str) -> tuple[ModuleType, Version | None]: """ safe function to import a module at runtime """ try: if module_path not in sys.modules: __import__(module_path) root = module_path.split(".")[0] version = getattr(sys.modules[root], "__version__", "0.0.0") return sys.modules[module_path], Version(version) except ImportError: # pragma: no cover return MissingModule, None def get_package_data(path: str) -> bytes: return importlib.resources.files("limits").joinpath(path).read_bytes() def parse_many(limit_string: str) -> list[RateLimitItem]: """ parses rate limits in string notation containing multiple rate limits (e.g. ``1/second; 5/minute``) :param limit_string: rate limit string using :ref:`ratelimit-string` :raise ValueError: if the string notation is invalid. """ if not (isinstance(limit_string, str) and EXPR.match(limit_string)): raise ValueError(f"couldn't parse rate limit string '{limit_string}'") limits = [] for limit in SEPARATORS.split(limit_string): match = SINGLE_EXPR.match(limit) if match: amount, _, multiples, granularity_string = match.groups() granularity = granularity_from_string(granularity_string) limits.append( granularity(int(amount), multiples and int(multiples) or None) ) return limits def parse(limit_string: str) -> RateLimitItem: """ parses a single rate limit in string notation (e.g. ``1/second`` or ``1 per second``) :param limit_string: rate limit string using :ref:`ratelimit-string` :raise ValueError: if the string notation is invalid. """ return list(parse_many(limit_string))[0] def granularity_from_string(granularity_string: str) -> type[RateLimitItem]: """ :param granularity_string: :raise ValueError: """ for granularity in GRANULARITIES.values(): if granularity.check_granularity_string(granularity_string): return granularity raise ValueError(f"no granularity matched for {granularity_string}") limits-4.4.1/limits/version.py000066400000000000000000000000571476517132700164000ustar00rootroot00000000000000""" empty file to be updated by versioneer """ limits-4.4.1/push-release.sh000077500000000000000000000002521476517132700157710ustar00rootroot00000000000000#!/bin/bash cur=$(git rev-parse --abbrev-ref HEAD) git checkout 4.x git push origin 4.x --tags git checkout stable git merge 4.x git push origin stable git checkout $cur limits-4.4.1/pyproject.toml000066400000000000000000000007751476517132700157630ustar00rootroot00000000000000[tool.versioneer] VCS = "git" style = "pep440-pre" versionfile_source = "limits/_version.py" versionfile_build = "limits/_version.py" parentdir_prefix = "limits-" tag_prefix = "" [tool.ruff] line-length = 88 indent-width = 4 exclude = ["_version.py"] [tool.ruff.format] quote-style = "double" indent-style = "space" skip-magic-trailing-comma = false line-ending = "auto" [tool.ruff.lint] typing-modules = ["limits.typing"] [tool.ruff.lint.isort] required-imports = ["from __future__ import annotations"] limits-4.4.1/pytest.ini000066400000000000000000000015071476517132700150720ustar00rootroot00000000000000[pytest] asyncio_mode = auto norecursedirs = build *.egg markers = unit: mark a test as a unit test. integration: mark a test as an integration test. redis: redis tests redis_sentinel: redis sentinel tests redis_cluster: redis cluster tests memory: in memory tests mongodb: mongodb tests memcached: memcached tests etcd: etcd tests valkey: valkey tests valkey_cluster: valkey cluster tests addopts = --verbose --tb=short --capture=no -rfEsxX --cov=limits -m "not benchmark" -K filterwarnings = error module::ResourceWarning module::pytest.PytestUnraisableExceptionWarning ignore::DeprecationWarning:limits\.storage ignore::DeprecationWarning:limits\.aio\.storage ignore::DeprecationWarning:etcd3 ignore::DeprecationWarning:google.protobuf limits-4.4.1/requirements/000077500000000000000000000000001476517132700155615ustar00rootroot00000000000000limits-4.4.1/requirements/ci.txt000066400000000000000000000000131476517132700167070ustar00rootroot00000000000000-r dev.txt limits-4.4.1/requirements/dev.txt000066400000000000000000000001301476517132700170720ustar00rootroot00000000000000-r test.txt -r docs.txt ruff keyring mypy twine types-deprecated types-setuptools wheel limits-4.4.1/requirements/docs.txt000066400000000000000000000003221476517132700172470ustar00rootroot00000000000000-r main.txt furo==2024.8.6 Sphinx>=4,<9 sphinx-copybutton==0.5.2 sphinx-autobuild==2024.10.3 sphinxext-opengraph==0.9.1 sphinx-inline-tabs==2023.4.21 sphinx-paramlinks==0.6.0 sphinxcontrib-programoutput==0.17 limits-4.4.1/requirements/main.txt000066400000000000000000000000641476517132700172460ustar00rootroot00000000000000deprecated>=1.2 packaging>=21,<25 typing_extensions limits-4.4.1/requirements/storage/000077500000000000000000000000001476517132700172255ustar00rootroot00000000000000limits-4.4.1/requirements/storage/async-etcd.txt000066400000000000000000000000061476517132700220140ustar00rootroot00000000000000aetcd limits-4.4.1/requirements/storage/async-memcached.txt000066400000000000000000000001431476517132700230050ustar00rootroot00000000000000emcache>=0.6.1;python_version<"3.11" emcache>=1;python_version>="3.11" and python_version<"3.13.0" limits-4.4.1/requirements/storage/async-mongodb.txt000066400000000000000000000000141476517132700225210ustar00rootroot00000000000000motor>=3,<4 limits-4.4.1/requirements/storage/async-redis.txt000066400000000000000000000000221476517132700222010ustar00rootroot00000000000000coredis>=3.4.0,<5 limits-4.4.1/requirements/storage/async-valkey.txt000066400000000000000000000000121476517132700223650ustar00rootroot00000000000000valkey>=6 limits-4.4.1/requirements/storage/etcd.txt000066400000000000000000000000061476517132700207010ustar00rootroot00000000000000etcd3 limits-4.4.1/requirements/storage/memcached.txt000066400000000000000000000000241476517132700216700ustar00rootroot00000000000000pymemcache>3,<5.0.0 limits-4.4.1/requirements/storage/mongodb.txt000066400000000000000000000000171476517132700214110ustar00rootroot00000000000000pymongo>4.1,<5 limits-4.4.1/requirements/storage/redis.txt000066400000000000000000000000371476517132700210740ustar00rootroot00000000000000redis>3,!=4.5.2,!=4.5.3,<6.0.0 limits-4.4.1/requirements/storage/rediscluster.txt000066400000000000000000000000351476517132700224740ustar00rootroot00000000000000redis>=4.2.0,!=4.5.2,!=4.5.3 limits-4.4.1/requirements/storage/valkey.txt000066400000000000000000000000121476517132700212520ustar00rootroot00000000000000valkey>=6 limits-4.4.1/requirements/test.txt000066400000000000000000000010201476517132700172720ustar00rootroot00000000000000-r main.txt -r storage/etcd.txt -r storage/redis.txt -r storage/rediscluster.txt -r storage/mongodb.txt -r storage/memcached.txt -r storage/valkey.txt -r storage/async-etcd.txt -r storage/async-memcached.txt -r storage/async-mongodb.txt -r storage/async-redis.txt -r storage/async-valkey.txt # Pin to < 4 for aetcd compatibility protobuf>3.6.1,<4 # Test related packages coverage hiro>0.1.6 flaky lovely-pytest-docker pytest<9 pytest-asyncio<0.26 pytest-benchmark[histogram] pytest-cov pytest-lazy-fixtures pytest-mock PyYAML limits-4.4.1/scripts/000077500000000000000000000000001476517132700145255ustar00rootroot00000000000000limits-4.4.1/scripts/github_release_notes.sh000077500000000000000000000004521476517132700212570ustar00rootroot00000000000000#!/bin/bash TAG=$(echo $GITHUB_REF | cut -d / -f 3) git format-patch -1 $TAG --stdout | grep -P '^\+' | \ sed '1,4d' | \ grep -v "Release Date" | \ sed -E -e 's/^\+(.*)/\1/' -e 's/^\*(.*)/## \1/' -e 's/^ //' -e 's/\:(.*)\:(.*)/\2/' | \ sed -E -e 's/`(.*) <(https.*)>`_/[\1](\2)/' limits-4.4.1/setup.cfg000066400000000000000000000011541476517132700146600ustar00rootroot00000000000000[flake8] exclude = build/**,doc/**,_version.py,version.py,versioneer.py max_line_length=100 ignore = W503 [mypy] strict = True check_untyped_defs = True disallow_any_generics = True disallow_any_unimported = True disallow_any_decorated = True disallow_any_explicit = True disallow_incomplete_defs = True disallow_untyped_calls = True disallow_untyped_defs = True disallow_untyped_decorators = True show_error_codes = True warn_return_any = True warn_unused_ignores = True [mypy-limits.storage.etcd] ignore_errors = True [mypy-limits.aio.storage.etcd] ignore_errors = True [mypy-limits._version] ignore_errors = True limits-4.4.1/setup.py000077500000000000000000000040011476517132700145460ustar00rootroot00000000000000""" setup.py for limits """ __author__ = "Ali-Akber Saifee" __email__ = "ali@indydevs.org" __copyright__ = "Copyright 2023, Ali-Akber Saifee" import itertools import os from setuptools import find_packages, setup import versioneer THIS_DIR = os.path.abspath(os.path.dirname(__file__)) def get_requirements(req_file): requirements = [] for r in open(os.path.join(THIS_DIR, "requirements", req_file)).read().splitlines(): if r.strip(): requirements.append(r.strip()) return requirements EXTRA_REQUIREMENTS = { "redis": get_requirements("storage/redis.txt"), "rediscluster": get_requirements("storage/rediscluster.txt"), "memcached": get_requirements("storage/memcached.txt"), "mongodb": get_requirements("storage/mongodb.txt"), "etcd": get_requirements("storage/etcd.txt"), "valkey": get_requirements("storage/valkey.txt"), "async-redis": get_requirements("storage/async-redis.txt"), "async-memcached": get_requirements("storage/async-memcached.txt"), "async-mongodb": get_requirements("storage/async-mongodb.txt"), "async-etcd": get_requirements("storage/async-etcd.txt"), "async-valkey": get_requirements("storage/async-valkey.txt"), } EXTRA_REQUIREMENTS["all"] = list(itertools.chain(*EXTRA_REQUIREMENTS.values())) setup( name="limits", author=__author__, author_email=__email__, license="MIT", url="https://limits.readthedocs.org", project_urls={ "Source": "https://github.com/alisaifee/limits", }, zip_safe=False, version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), install_requires=get_requirements("main.txt"), classifiers=[k for k in open("CLASSIFIERS").read().split("\n") if k], description="Rate limiting utilities", long_description=open("README.rst").read(), packages=find_packages(exclude=["tests*"]), python_requires=">=3.10", extras_require=EXTRA_REQUIREMENTS, include_package_data=True, package_data={ "limits": ["py.typed"], }, ) limits-4.4.1/tag.sh000077500000000000000000000022251476517132700141510ustar00rootroot00000000000000#!/bin/bash last_tag=$(git tag | sort --version-sort -r | head -n 1) echo current version:$(python setup.py --version), current tag: $last_tag read -p "new version:" new_version last_portion=$(grep -P "^Changelog$" HISTORY.rst -5 | grep -P "^v\d+.\d+") changelog_file=/var/tmp/limiter.newchangelog new_changelog_heading="v${new_version}" new_changelog_heading_sep=$(python -c "print('-'*len('$new_changelog_heading'))") echo $new_changelog_heading > $changelog_file echo $new_changelog_heading_sep >> $changelog_file echo "Release Date: `date +"%Y-%m-%d"`" >> $changelog_file python -c "print(open('HISTORY.rst').read().replace('$last_portion', open('$changelog_file').read() +'\n' + '$last_portion'))" > HISTORY.rst.new cp HISTORY.rst.new HISTORY.rst vim -O HISTORY.rst <(echo \# vim:filetype=git;git log $last_tag..HEAD --format='* %s (%h)%n%b' | sed -E '/^\*/! s/(.*)/ \1/g') if rst2html HISTORY.rst > /dev/null then echo "Tag $new_version" git add HISTORY.rst git commit -m "Update changelog for ${new_version}" git tag -s ${new_version} -m "Tag version ${new_version}" rm HISTORY.rst.new else echo changelog has errors. skipping tag. fi; limits-4.4.1/tests/000077500000000000000000000000001476517132700142005ustar00rootroot00000000000000limits-4.4.1/tests/__init__.py000066400000000000000000000000001476517132700162770ustar00rootroot00000000000000limits-4.4.1/tests/aio/000077500000000000000000000000001476517132700147505ustar00rootroot00000000000000limits-4.4.1/tests/aio/__init__.py000066400000000000000000000000001476517132700170470ustar00rootroot00000000000000limits-4.4.1/tests/aio/storage/000077500000000000000000000000001476517132700164145ustar00rootroot00000000000000limits-4.4.1/tests/aio/storage/__init__.py000066400000000000000000000000001476517132700205130ustar00rootroot00000000000000limits-4.4.1/tests/aio/storage/test_memory.py000066400000000000000000000010711476517132700213340ustar00rootroot00000000000000from __future__ import annotations import pickle from limits.aio.storage import MemoryStorage class TestSerialization: async def test_pickle(self): storage = MemoryStorage() assert 1 == await storage.incr("test", 60) assert await storage.acquire_entry("moving_test", 2, 60) dump = pickle.dumps(storage) restored = pickle.loads(dump) assert 2 == await restored.incr("test", 60) assert await restored.acquire_entry("moving_test", 2, 60) assert not await restored.acquire_entry("moving_test", 2, 60) limits-4.4.1/tests/aio/test_storage.py000066400000000000000000000357241476517132700200400ustar00rootroot00000000000000from __future__ import annotations import time import pytest from pytest_lazy_fixtures import lf from limits import RateLimitItemPerMinute, RateLimitItemPerSecond from limits.aio.storage import ( EtcdStorage, MemcachedStorage, MemoryStorage, MongoDBStorage, MovingWindowSupport, RedisClusterStorage, RedisSentinelStorage, RedisStorage, SlidingWindowCounterSupport, Storage, ) from limits.aio.strategies import ( MovingWindowRateLimiter, SlidingWindowCounterRateLimiter, ) from limits.errors import StorageError from limits.storage import storage_from_string from tests.utils import async_fixed_start @pytest.mark.asyncio class TestBaseStorage: async def test_pluggable_storage_fixed_only(self): class MyStorage(Storage): STORAGE_SCHEME = ["async+mystorage+fixed"] @property def base_exceptions(self): return ValueError async def incr(self, key, expiry, elastic_expiry=False): return async def get(self, key): return 0 async def get_expiry(self, key): return time.time() async def reset(self): return async def check(self): return async def clear(self): return storage = storage_from_string("async+mystorage+fixed://") assert isinstance(storage, MyStorage) with pytest.raises(NotImplementedError): MovingWindowRateLimiter(storage) with pytest.raises(NotImplementedError): SlidingWindowCounterRateLimiter(storage) async def test_pluggable_storage_moving_window(self): class MyStorage(Storage): STORAGE_SCHEME = ["async+mystorage+moving"] @property def base_exceptions(self): return ValueError async def incr(self, key, expiry, elastic_expiry=False): return async def get(self, key): return 0 async def get_expiry(self, key): return time.time() async def reset(self): return async def check(self): return async def clear(self): return async def acquire_entry(self, *a, **k): return True async def get_moving_window(self, *a, **k): return (time.time(), 1) storage = storage_from_string("async+mystorage+moving://") assert isinstance(storage, MyStorage) MovingWindowRateLimiter(storage) async def test_pluggable_storage_sliding_window_counter(self): class MyStorage(Storage, SlidingWindowCounterSupport): STORAGE_SCHEME = ["async+mystorage+sliding"] @property def base_exceptions(self): return ValueError async def incr(self, key, expiry, elastic_expiry=False): return async def get(self, key): return 0 async def get_expiry(self, key): return time.time() async def reset(self): return async def check(self): return async def clear(self): return async def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: pass async def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: pass storage = storage_from_string("async+mystorage+sliding://") assert isinstance(storage, MyStorage) SlidingWindowCounterRateLimiter(storage) @pytest.mark.asyncio @pytest.mark.parametrize( "uri, args, expected_instance, fixture", [ pytest.param( "async+memory://", {}, MemoryStorage, None, marks=pytest.mark.memory, id="in-memory", ), pytest.param( "async+redis://localhost:7379", {}, RedisStorage, lf("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "async+redis+unix:///tmp/limits.redis.sock", {}, RedisStorage, lf("redis_uds"), marks=pytest.mark.redis, id="redis-uds", ), pytest.param( "async+redis+unix://:password/tmp/limits.redis.sock", {}, RedisStorage, lf("redis_uds"), marks=pytest.mark.redis, id="redis-uds-auth", ), pytest.param( "async+memcached://localhost:22122", {}, MemcachedStorage, lf("memcached"), marks=pytest.mark.memcached, id="memcached", ), pytest.param( "async+memcached://localhost:22122,localhost:22123", {}, MemcachedStorage, lf("memcached_cluster"), marks=pytest.mark.memcached, id="memcached-cluster", ), pytest.param( "async+redis+sentinel://localhost:26379", {"service_name": "mymaster"}, RedisSentinelStorage, lf("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "async+redis+sentinel://localhost:26379/mymaster", {}, RedisSentinelStorage, lf("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel-service-name-url", ), pytest.param( "async+redis+sentinel://:sekret@localhost:36379/mymaster", {"password": "sekret"}, RedisSentinelStorage, lf("redis_sentinel_auth"), marks=pytest.mark.redis_sentinel, id="redis-sentinel-auth", ), pytest.param( "async+redis+cluster://localhost:7001/", {}, RedisClusterStorage, lf("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "async+redis+cluster://:sekret@localhost:8400/", {}, RedisClusterStorage, lf("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "async+mongodb://localhost:37017/", {}, MongoDBStorage, lf("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "async+etcd://localhost:2379", {}, EtcdStorage, lf("etcd"), marks=pytest.mark.etcd, id="etcd", ), ], ) class TestConcreteStorages: async def test_storage_string(self, uri, args, expected_instance, fixture): assert isinstance(storage_from_string(uri, **args), expected_instance) @async_fixed_start async def test_expiry_incr(self, uri, args, expected_instance, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerSecond(1) await storage.incr(limit.key_for(), limit.get_expiry()) time.sleep(1.1) assert await storage.get(limit.key_for()) == 0 @async_fixed_start async def test_expiry_acquire_entry(self, uri, args, expected_instance, fixture): if not issubclass(expected_instance, MovingWindowSupport): pytest.skip(f"{expected_instance} does not support acquire entry") storage = storage_from_string(uri, **args) limit = RateLimitItemPerSecond(1) assert await storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry() ) time.sleep(1.1) assert await storage.get(limit.key_for()) == 0 @async_fixed_start async def test_expiry_acquire_sliding_window_entry( self, uri, args, expected_instance, fixture ): if not issubclass(expected_instance, SlidingWindowCounterSupport): pytest.skip(f"{expected_instance} does not support acquire entry") storage = storage_from_string(uri, **args) limit = RateLimitItemPerSecond(1) assert await storage.acquire_sliding_window_entry( limit.key_for(), limit.amount, limit.get_expiry() ) assert (await storage.get_sliding_window(limit.key_for(), limit.get_expiry()))[ -1 ] == pytest.approx(2, abs=1e2) async def test_incr_custom_amount(self, uri, args, expected_instance, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerMinute(1) assert 1 == await storage.incr(limit.key_for(), limit.get_expiry(), amount=1) assert 11 == await storage.incr(limit.key_for(), limit.get_expiry(), amount=10) async def test_acquire_entry_custom_amount( self, uri, args, expected_instance, fixture ): if not issubclass(expected_instance, MovingWindowSupport): pytest.skip(f"{expected_instance} does not support acquire entry") storage = storage_from_string(uri, **args) limit = RateLimitItemPerMinute(10) assert not await storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=11 ) assert await storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=1 ) assert not await storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=10 ) async def test_storage_check(self, uri, args, expected_instance, fixture): assert await storage_from_string(uri, **args).check() async def test_storage_reset(self, uri, args, expected_instance, fixture): if expected_instance == MemcachedStorage: pytest.skip("Reset not supported for memcached") limit1 = RateLimitItemPerMinute(10) # default namespace, LIMITER limit2 = RateLimitItemPerMinute(10, namespace="OTHER") storage = storage_from_string(uri, **args) for i in range(10): await storage.incr(limit1.key_for(str(i)), limit1.get_expiry()) await storage.incr(limit2.key_for(str(i)), limit2.get_expiry()) assert await storage.reset() == 20 async def test_storage_clear(self, uri, args, expected_instance, fixture): limit = RateLimitItemPerMinute(10) storage = storage_from_string(uri, **args) await storage.incr(limit.key_for(), limit.get_expiry()) assert 1 == await storage.get(limit.key_for()) await storage.clear(limit.key_for()) assert 0 == await storage.get(limit.key_for()) @pytest.mark.asyncio @pytest.mark.parametrize("wrap_exceptions", (True, False)) class TestStorageErrors: class MyStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport): STORAGE_SCHEME = ["mystorage"] class MyError(Exception): pass @property def base_exceptions(self): return self.MyError async def incr(self, key, expiry, elastic_expiry=False, amount=1): raise self.MyError() async def get(self, key): raise self.MyError() async def get_expiry(self, key): raise self.MyError() async def reset(self): raise self.MyError() async def check(self): raise self.MyError() async def clear(self, key): raise self.MyError() async def acquire_entry(self, key, limit, expiry, amount=1): raise self.MyError() async def get_moving_window(self, key, limit, expiry): raise self.MyError() async def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: raise self.MyError() async def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: raise self.MyError() def assert_exception(self, exc, wrap_exceptions): if wrap_exceptions: assert isinstance(exc, StorageError) assert isinstance(exc.storage_error, self.MyStorage.MyError) else: assert isinstance(exc, self.MyStorage.MyError) async def test_incr_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).incr("", 1) self.assert_exception(exc.value, wrap_exceptions) async def test_get_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).get("") self.assert_exception(exc.value, wrap_exceptions) async def test_get_expiry_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).get_expiry("") self.assert_exception(exc.value, wrap_exceptions) async def test_reset_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).reset() self.assert_exception(exc.value, wrap_exceptions) async def test_check_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).check() self.assert_exception(exc.value, wrap_exceptions) async def test_clear_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).clear("") self.assert_exception(exc.value, wrap_exceptions) async def test_acquire_entry_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).acquire_entry( "", 1, 1 ) self.assert_exception(exc.value, wrap_exceptions) async def test_get_moving_window_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).get_moving_window( "", 1, 1 ) self.assert_exception(exc.value, wrap_exceptions) async def test_acquire_sliding_window_entry_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage( wrap_exceptions=wrap_exceptions ).acquire_sliding_window_entry("", 1, 1) self.assert_exception(exc.value, wrap_exceptions) async def test_get_sliding_window_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).get_sliding_window( "", 1 ) self.assert_exception(exc.value, wrap_exceptions) limits-4.4.1/tests/aio/test_strategy.py000066400000000000000000000424551476517132700202350ustar00rootroot00000000000000from __future__ import annotations import time from math import ceil import pytest from limits.aio.strategies import ( FixedWindowElasticExpiryRateLimiter, FixedWindowRateLimiter, MovingWindowRateLimiter, SlidingWindowCounterRateLimiter, ) from limits.limits import ( RateLimitItemPerHour, RateLimitItemPerMinute, RateLimitItemPerSecond, ) from limits.storage import storage_from_string from limits.storage.base import TimestampedSlidingWindow from tests.utils import ( async_all_storage, async_fixed_start, async_moving_window_storage, async_sliding_window_counter_storage, async_window, timestamp_based_key_ttl, ) @pytest.mark.asyncio @async_all_storage class TestAsyncFixedWindow: @async_fixed_start async def test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) async with async_window(1) as (start, _): assert all([await limiter.hit(limit) for _ in range(0, 10)]) assert not await limiter.hit(limit) assert (await limiter.get_window_stats(limit)).remaining == 0 assert (await limiter.get_window_stats(limit)).reset_time == pytest.approx( start + 2, 1e-2 ) @async_fixed_start async def test_fixed_window_empty_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert (await limiter.get_window_stats(limit)).remaining == 10 assert (await limiter.get_window_stats(limit)).reset_time == pytest.approx( time.time(), 1e-2 ) @async_fixed_start async def test_fixed_window_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerMinute(10, 2) assert not await limiter.hit(limit, "k1", cost=11) assert await limiter.hit(limit, "k2", cost=5) assert (await limiter.get_window_stats(limit, "k2")).remaining == 5 assert not await limiter.test(limit, "k2", cost=6) assert not await limiter.hit(limit, "k2", cost=6) @async_fixed_start async def test_fixed_window_with_elastic_expiry(self, uri, args, fixture): storage = storage_from_string(uri, **args) with pytest.warns(DeprecationWarning): limiter = FixedWindowElasticExpiryRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) async with async_window(1) as (start, end): assert all([await limiter.hit(limit) for _ in range(0, 10)]) assert not await limiter.hit(limit) assert (await limiter.get_window_stats(limit)).remaining == 0 assert (await limiter.get_window_stats(limit)).reset_time == pytest.approx( start + 2, 1e-2 ) async with async_window(3) as (start, end): assert not await limiter.hit(limit) assert await limiter.hit(limit) assert (await limiter.get_window_stats(limit)).remaining == 9 assert (await limiter.get_window_stats(limit)).reset_time == pytest.approx( end + 2, 1e-2 ) @async_fixed_start async def test_fixed_window_with_elastic_expiry_multiple_cost( self, uri, args, fixture ): storage = storage_from_string(uri, **args) with pytest.warns(DeprecationWarning): limiter = FixedWindowElasticExpiryRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert not await limiter.hit(limit, "k1", cost=11) async with async_window(0) as (_, end): assert await limiter.hit(limit, "k2", cost=5) assert (await limiter.get_window_stats(limit, "k2")).remaining == 5 assert ( await limiter.get_window_stats(limit, "k2") ).reset_time == pytest.approx(end + 2, 1e-2) assert not await limiter.hit(limit, "k2", cost=6) @async_fixed_start @pytest.mark.flaky async def test_test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerHour(2, 1) assert await limiter.hit(limit) assert await limiter.test(limit) assert await limiter.hit(limit) assert not await limiter.test(limit) assert not await limiter.hit(limit) @pytest.mark.asyncio @async_moving_window_storage class TestAsyncMovingWindow: async def test_moving_window_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerMinute(2) assert await limiter.hit(limit, "key") time.sleep(1) assert await limiter.hit(limit, "key") time.sleep(1) assert not await limiter.hit(limit, "key") assert (await limiter.get_window_stats(limit, "key")).remaining == 0 assert ( await limiter.get_window_stats(limit, "key") ).reset_time - time.time() == pytest.approx(58, 1e-2) async def test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) # 5 hits in the first 500ms async with async_window(0.5): assert all([await limiter.hit(limit) for i in range(5)]) # 5 hits in the last 200ms async with async_window(2, delay=1.3): assert all([await limiter.hit(limit) for i in range(5)]) # 11th fails assert not await limiter.hit(limit) # 5 more succeed since there were only 5 in the last 2 seconds assert all([await limiter.hit(limit) for i in range(5)]) assert (await limiter.get_window_stats(limit)).remaining == 0 async def test_moving_window_empty_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert (await limiter.get_window_stats(limit)).remaining == 10 assert (await limiter.get_window_stats(limit)).reset_time == pytest.approx( time.time() + 2, 1e-2 ) async def test_moving_window_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert not await limiter.hit(limit, "k1", cost=11) # 5 hits in the first 100ms async with async_window(0.1): assert await limiter.hit(limit, "k2", cost=5) # 5 hits in the last 100ms async with async_window(2, delay=1.8): assert all([await limiter.hit(limit, "k2") for i in range(4)]) assert not await limiter.test(limit, "k2", cost=2) assert not await limiter.hit(limit, "k2", cost=2) assert await limiter.hit(limit, "k2") assert all([await limiter.hit(limit, "k2") for i in range(5)]) assert (await limiter.get_window_stats(limit, "k2")).remaining == 0 assert not await limiter.hit(limit, "k2", cost=2) async def test_moving_window_varying_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) five_per_min = RateLimitItemPerMinute(5) await limiter.hit(five_per_min, cost=5) assert not await limiter.hit(five_per_min, cost=2) await limiter.clear(five_per_min) assert await limiter.hit(five_per_min) async def test_moving_window_huge_cost_async(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) many_per_min = RateLimitItemPerMinute(1_000_000) await limiter.hit(many_per_min, cost=999_999) assert not await limiter.hit(many_per_min, cost=2) await limiter.clear(many_per_min) assert await limiter.hit(many_per_min) async def test_test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerHour(2, 1) limiter = MovingWindowRateLimiter(storage) assert await limiter.hit(limit) assert await limiter.test(limit) assert await limiter.hit(limit) assert not await limiter.test(limit) assert not await limiter.hit(limit) @pytest.mark.asyncio @async_sliding_window_counter_storage class TestAsyncSlidingWindow: @async_fixed_start async def test_sliding_window_counter(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) if isinstance(storage, TimestampedSlidingWindow): # Avoid testing the behaviour when the window is about to be reset ttl = timestamp_based_key_ttl(limit) if ttl < 1: time.sleep(ttl) async with async_window(1) as (start, _): assert all([await limiter.hit(limit) for _ in range(0, 10)]) assert not await limiter.hit(limit) assert (await limiter.get_window_stats(limit)).remaining == 0 assert (await limiter.get_window_stats(limit)).reset_time == pytest.approx( start + 2, 1e-2 ) @pytest.mark.flaky async def test_sliding_window_counter_total_reset(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) multiple = 10 period = 1 limit = RateLimitItemPerSecond(multiple, period) assert (await limiter.get_window_stats(limit)).remaining == multiple if isinstance(storage, TimestampedSlidingWindow): # Avoid testing the behaviour when the window is about to be reset ttl = timestamp_based_key_ttl(limit) if ttl < 0.5: time.sleep(ttl) assert await limiter.hit(limit, cost=multiple) assert not await limiter.hit(limit) assert (await limiter.get_window_stats(limit)).remaining == 0 time.sleep(period * 2) assert (await limiter.get_window_stats(limit)).remaining == multiple assert (await limiter.get_window_stats(limit)).reset_time == pytest.approx( time.time(), abs=1e-2 ) async def test_sliding_window_counter_current_window(self, uri, args, fixture): """Check the window stats when only the current window is filled""" storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerHour(2, 24) if isinstance(storage, TimestampedSlidingWindow): # Avoid testing the behaviour when the window is about to be reset ttl = timestamp_based_key_ttl(limit) if ttl < 0.5: time.sleep(ttl) assert await limiter.hit(limit) now = time.time() if isinstance(storage, TimestampedSlidingWindow): expected_reset_time = now + timestamp_based_key_ttl(limit, now) else: expected_reset_time = now + 24 * 3600 assert (await limiter.get_window_stats(limit)).reset_time == pytest.approx( expected_reset_time, 1e-2 ) assert (await limiter.get_window_stats(limit)).remaining == 1 assert await limiter.hit(limit) assert not await limiter.hit(limit) @pytest.mark.flaky(max_runs=3) async def test_sliding_window_counter_previous_window(self, uri, args, fixture): """Check the window stats when the previous window is partially filled""" storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerSecond(5, 1) sleep_margin = 0.001 if isinstance(storage, TimestampedSlidingWindow): # Avoid testing the behaviour when the window is about to be reset ttl = timestamp_based_key_ttl(limit) if ttl < 0.3: time.sleep(ttl + sleep_margin) t0 = time.time() previous_window_hits = 3 await limiter.hit(limit) t1 = time.time() for i in range(previous_window_hits - 1): await limiter.hit(limit) # Check the stats: only the current window is filled if isinstance(storage, TimestampedSlidingWindow): expected_reset_time = t0 + timestamp_based_key_ttl(limit, t0) else: expected_reset_time = t1 + 1 reset_time = (await limiter.get_window_stats(limit)).reset_time assert reset_time == pytest.approx(expected_reset_time, abs=0.03) assert (await limiter.get_window_stats(limit)).remaining == 2 # Wait for the next window sleep_time = expected_reset_time - time.time() + sleep_margin time.sleep(sleep_time) # A new hit should be available immediately after window shift # The limiter should reset in a fraction of a period, according to how many hits are in the previous window reset_time = (await limiter.get_window_stats(limit)).reset_time reset_in = reset_time - time.time() assert reset_in == pytest.approx( limit.get_expiry() / previous_window_hits, abs=0.03 ) assert (await limiter.get_window_stats(limit)).remaining == 3 assert await limiter.hit(limit) assert await limiter.hit(limit) for i in range(previous_window_hits): # A new item hit should be freed by the previous window t0 = time.time() assert (await limiter.get_window_stats(limit)).remaining == 1 assert await limiter.hit(limit) assert (await limiter.get_window_stats(limit)).remaining == 0 assert not await limiter.hit(limit) # The previous window has 4 hits. The reset time should be in a 1/4 of the window expiry reset_time = (await limiter.get_window_stats(limit)).reset_time t1 = time.time() reset_in = reset_time - time.time() assert reset_in == pytest.approx( limit.get_expiry() / previous_window_hits - (t1 - t0), abs=0.03 ) # Wait for the next hit available time.sleep(reset_in + sleep_margin) @async_fixed_start async def test_sliding_window_counter_empty_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert (await limiter.get_window_stats(limit)).remaining == 10 assert (await limiter.get_window_stats(limit)).reset_time == pytest.approx( time.time(), 1e-2 ) @async_fixed_start @pytest.mark.flaky async def test_sliding_window_counter_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerMinute(2) if isinstance(storage, TimestampedSlidingWindow): next_second_from_now = ceil(time.time()) assert await limiter.hit(limit, "key") time.sleep(1) assert await limiter.hit(limit, "key") time.sleep(1) assert not await limiter.hit(limit, "key") assert (await limiter.get_window_stats(limit, "key")).remaining == 0 if isinstance(storage, TimestampedSlidingWindow): # With timestamp-based key implementation, # the reset time is periodic according to the worker's timestamp reset_time = (await limiter.get_window_stats(limit, "key")).reset_time expected_reset = int( limit.get_expiry() - (next_second_from_now % limit.get_expiry()) ) assert reset_time - next_second_from_now == pytest.approx( expected_reset, abs=1e-2 ) else: assert ( await limiter.get_window_stats(limit, "key") ).reset_time - time.time() == pytest.approx(58, 1e-2) @async_fixed_start async def test_sliding_window_counter_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerMinute(10, 2) if isinstance(storage, TimestampedSlidingWindow): # Avoid testing the behaviour when the window is about to be reset ttl = timestamp_based_key_ttl(limit) if ttl < 0.5: time.sleep(ttl) assert not await limiter.hit(limit, "k1", cost=11) assert await limiter.hit(limit, "k2", cost=5) assert (await limiter.get_window_stats(limit, "k2")).remaining == 5 assert not await limiter.test(limit, "k2", cost=6) assert not await limiter.hit(limit, "k2", cost=6) async def test_test_sliding_window_counter(self, uri, args, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerHour(2, 1) limiter = SlidingWindowCounterRateLimiter(storage) assert await limiter.hit(limit) assert await limiter.test(limit) assert await limiter.hit(limit) assert not await limiter.test(limit) assert not await limiter.hit(limit) limits-4.4.1/tests/benchmarks/000077500000000000000000000000001476517132700163155ustar00rootroot00000000000000limits-4.4.1/tests/benchmarks/__init__.py000066400000000000000000000000001476517132700204140ustar00rootroot00000000000000limits-4.4.1/tests/benchmarks/test_storage.py000066400000000000000000000054721476517132700214020ustar00rootroot00000000000000from __future__ import annotations import functools import random import pytest import limits.aio.strategies from limits import RateLimitItemPerMinute from limits.storage import storage_from_string from limits.strategies import ( FixedWindowRateLimiter, MovingWindowRateLimiter, SlidingWindowCounterRateLimiter, ) from tests.utils import ( all_storage, async_all_storage, async_moving_window_storage, async_sliding_window_counter_storage, moving_window_storage, sliding_window_counter_storage, ) def hit_window(strategy, storage): limit = RateLimitItemPerMinute(500) uid = int(random.random() * 100) strategy(storage).hit(limit, uid) def hit_window_async(event_loop, strategy, storage): limit = RateLimitItemPerMinute(500) uid = int(random.random() * 100) event_loop.run_until_complete(strategy(storage).hit(limit, uid)) @all_storage @pytest.mark.benchmark(group="fixed-window") def test_fixed_window(benchmark, uri, args, fixture): benchmark( functools.partial( hit_window, FixedWindowRateLimiter, storage_from_string(uri, **args) ) ) @sliding_window_counter_storage @pytest.mark.benchmark(group="sliding-window-counter") def test_sliding_window_counter(benchmark, uri, args, fixture): benchmark( functools.partial( hit_window, SlidingWindowCounterRateLimiter, storage_from_string(uri, **args), ) ) @moving_window_storage @pytest.mark.benchmark(group="moving-window") def test_moving_window(benchmark, uri, args, fixture): benchmark( functools.partial( hit_window, MovingWindowRateLimiter, storage_from_string(uri, **args) ) ) @async_all_storage @pytest.mark.benchmark(group="async-fixed-window") def test_fixed_window_async(event_loop, benchmark, uri, args, fixture): benchmark( functools.partial( hit_window_async, event_loop, limits.aio.strategies.FixedWindowRateLimiter, storage_from_string(uri, **args), ) ) @async_moving_window_storage @pytest.mark.benchmark(group="async-moving-window") def test_moving_window_async(event_loop, benchmark, uri, args, fixture): benchmark( functools.partial( hit_window_async, event_loop, limits.aio.strategies.MovingWindowRateLimiter, storage_from_string(uri, **args), ) ) @async_sliding_window_counter_storage @pytest.mark.benchmark(group="async-sliding-window-counter") def test_sliding_window_counter_async(event_loop, benchmark, uri, args, fixture): benchmark( functools.partial( hit_window_async, event_loop, limits.aio.strategies.SlidingWindowCounterRateLimiter, storage_from_string(uri, **args), ) ) limits-4.4.1/tests/conftest.py000066400000000000000000000223511476517132700164020ustar00rootroot00000000000000from __future__ import annotations import os import platform import socket import time import etcd3 import pymemcache import pymemcache.client import pymongo import pytest import redis import redis.sentinel import valkey def check_redis_cluster_ready(host, port): try: return redis.Redis(host, port).cluster("info")["cluster_state"] == "ok" except Exception: return False def check_redis_auth_cluster_ready(host, port): try: return ( redis.Redis(host, port, password="sekret").cluster("info")["cluster_state"] == "ok" ) except Exception: return False def check_redis_ssl_cluster_ready(host, port): storage_url = ( "rediss://localhost:8301/?ssl_cert_reqs=required" "&ssl_keyfile=./tests/tls/client.key" "&ssl_certfile=./tests/tls/client.crt" "&ssl_ca_certs=./tests/tls/ca.crt" ) try: return ( redis.Redis.from_url(storage_url).cluster("info")["cluster_state"] == "ok" ) except Exception: return False def check_sentinel_ready(host, port): try: return redis.sentinel.Sentinel([(host, port)]).master_for("mymaster").ping() except: # noqa return False def check_sentinel_auth_ready(host, port): try: return ( redis.sentinel.Sentinel( [(host, port)], sentinel_kwargs={"password": "sekret"}, password="sekret", ) .master_for("mymaster") .ping() ) except: # noqa return False def check_mongo_ready(host, port): try: pymongo.MongoClient("mongodb://localhost:37017").server_info() return True except: # noqa return False def check_etcd_ready(host, port): try: etcd3.client(host, port).status() return True except: # noqa return False @pytest.fixture(scope="session") def host_ip_env(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(("10.255.255.255", 1)) ip = s.getsockname()[0] except Exception: ip = "127.0.0.1" finally: s.close() os.environ["HOST_IP"] = str(ip) @pytest.fixture(scope="session") def docker_services(host_ip_env, docker_services): return docker_services def ci_delay(): if os.environ.get("CI") == "True": time.sleep(10) @pytest.fixture(scope="session") def etcd_client(docker_services): docker_services.start("etcd") docker_services.wait_for_service("etcd", 2379, check_etcd_ready) ci_delay() return etcd3.client() @pytest.fixture(scope="session") def redis_basic_client(docker_services): docker_services.start("redis-basic") ci_delay() return redis.StrictRedis("localhost", 7379) @pytest.fixture(scope="session") def redis_uds_client(docker_services): if platform.system().lower() == "darwin": pytest.skip("Fixture not supported on OSX") docker_services.start("redis-uds") ci_delay() return redis.from_url("unix:///tmp/limits.redis.sock") @pytest.fixture(scope="session") def redis_auth_client(docker_services): docker_services.start("redis-auth") ci_delay() return redis.from_url("redis://:sekret@localhost:7389") @pytest.fixture(scope="session") def redis_ssl_client(docker_services): docker_services.start("redis-ssl") storage_url = ( "rediss://localhost:8379/0?ssl_cert_reqs=required" "&ssl_keyfile=./tests/tls/client.key" "&ssl_certfile=./tests/tls/client.crt" "&ssl_ca_certs=./tests/tls/ca.crt" ) ci_delay() return redis.from_url(storage_url) @pytest.fixture(scope="session") def redis_cluster_client(docker_services): docker_services.start("redis-cluster-init") docker_services.wait_for_service("redis-cluster-6", 7006, check_redis_cluster_ready) ci_delay() return redis.cluster.RedisCluster("localhost", 7001) @pytest.fixture(scope="session") def redis_auth_cluster_client(docker_services): docker_services.start("redis-cluster-auth-init") docker_services.wait_for_service( "redis-cluster-auth-3", 8402, check_redis_auth_cluster_ready ) ci_delay() return redis.cluster.RedisCluster("localhost", 8400, password="sekret") @pytest.fixture(scope="session") def redis_ssl_cluster_client(docker_services): docker_services.start("redis-ssl-cluster-init") docker_services.wait_for_service( "redis-ssl-cluster-6", 8306, check_redis_ssl_cluster_ready ) storage_url = ( "rediss://localhost:8301/?ssl_cert_reqs=required" "&ssl_keyfile=./tests/tls/client.key" "&ssl_certfile=./tests/tls/client.crt" "&ssl_ca_certs=./tests/tls/ca.crt" ) ci_delay() return redis.cluster.RedisCluster.from_url(storage_url) @pytest.fixture(scope="session") def redis_sentinel_client(docker_services): docker_services.start("redis-sentinel") docker_services.wait_for_service("redis-sentinel", 26379, check_sentinel_ready) ci_delay() return redis.sentinel.Sentinel([("localhost", 26379)]) @pytest.fixture(scope="session") def redis_sentinel_auth_client(docker_services): docker_services.start("redis-sentinel-auth") docker_services.wait_for_service( "redis-sentinel-auth", 26379, check_sentinel_auth_ready ) ci_delay() return redis.sentinel.Sentinel( [("localhost", 36379)], sentinel_kwargs={"password": "sekret"}, password="sekret", ) @pytest.fixture(scope="session") def memcached_client(docker_services): docker_services.start("memcached-1") ci_delay() return pymemcache.Client(("localhost", 22122)) @pytest.fixture(scope="session") def memcached_cluster_client(docker_services): docker_services.start("memcached-1") docker_services.start("memcached-2") ci_delay() return pymemcache.client.HashClient([("localhost", 22122), ("localhost", 22123)]) @pytest.fixture(scope="session") def memcached_uds_client(docker_services): if platform.system().lower() == "darwin": pytest.skip("Fixture not supported on OSX") docker_services.start("memcached-uds") ci_delay() return pymemcache.Client("/tmp/limits.memcached.sock") @pytest.fixture(scope="session") def mongodb_client(docker_services): docker_services.start("mongodb") docker_services.wait_for_service("mongodb", 27017, check_mongo_ready) ci_delay() return pymongo.MongoClient("mongodb://localhost:37017") @pytest.fixture(scope="session") def valkey_basic_client(docker_services): docker_services.start("valkey-basic") ci_delay() return valkey.Valkey("localhost", 12379) @pytest.fixture(scope="session") def valkey_cluster_client(docker_services): docker_services.start("valkey-cluster-init") docker_services.wait_for_service( "valkey-cluster-6", 2006, check_redis_cluster_ready ) ci_delay() return redis.cluster.RedisCluster("localhost", 2001) @pytest.fixture def memcached(memcached_client): memcached_client.flush_all() return memcached_client @pytest.fixture def memcached_uds(memcached_uds_client): memcached_uds_client.flush_all() return memcached_uds_client @pytest.fixture def memcached_cluster(memcached_cluster_client): memcached_cluster_client.flush_all() return memcached_cluster_client @pytest.fixture def redis_basic(redis_basic_client): redis_basic_client.flushall() return redis_basic @pytest.fixture def redis_ssl(redis_ssl_client): redis_ssl_client.flushall() return redis_ssl_client @pytest.fixture def redis_auth(redis_auth_client): redis_auth_client.flushall() return redis_auth_client @pytest.fixture def redis_uds(redis_uds_client): redis_uds_client.flushall() return redis_uds_client @pytest.fixture def redis_cluster(redis_cluster_client): redis_cluster_client.flushall() return redis_cluster_client @pytest.fixture def redis_auth_cluster(redis_auth_cluster_client): redis_auth_cluster_client.flushall() return redis_auth_cluster_client @pytest.fixture def redis_ssl_cluster(redis_ssl_cluster_client): redis_ssl_cluster_client.flushall() return redis_ssl_cluster_client @pytest.fixture def redis_sentinel(redis_sentinel_client): redis_sentinel_client.master_for("mymaster").flushall() return redis_sentinel @pytest.fixture def redis_sentinel_auth(redis_sentinel_auth_client): redis_sentinel_auth_client.master_for("mymaster").flushall() return redis_sentinel_auth_client @pytest.fixture def mongodb(mongodb_client): mongodb_client.limits.windows.drop() mongodb_client.limits.counters.drop() return mongodb_client @pytest.fixture def etcd(etcd_client): etcd_client.delete_prefix("limits/") return etcd_client @pytest.fixture def valkey_basic(valkey_basic_client): valkey_basic_client.flushall() return valkey_basic_client @pytest.fixture def valkey_cluster(valkey_cluster_client): valkey_cluster_client.flushall() return valkey_cluster_client @pytest.fixture(scope="session") def docker_services_project_name(): return "limits" @pytest.fixture(scope="session") def docker_compose_files(pytestconfig): """Get the docker-compose.yml absolute path. Override this fixture in your tests if you need a custom location. """ return ["docker-compose.yml"] limits-4.4.1/tests/integration/000077500000000000000000000000001476517132700165235ustar00rootroot00000000000000limits-4.4.1/tests/integration/test_concurrency.py000066400000000000000000000124041476517132700224670ustar00rootroot00000000000000from __future__ import annotations import asyncio import random import threading import time from contextlib import suppress from uuid import uuid4 import pytest import limits.aio.storage.memory import limits.aio.strategies import limits.strategies from limits.errors import ConcurrentUpdateError from limits.limits import RateLimitItemPerMinute from limits.storage import storage_from_string from limits.storage.base import TimestampedSlidingWindow from tests.utils import ( all_storage, async_all_storage, async_moving_window_storage, async_sliding_window_counter_storage, moving_window_storage, sliding_window_counter_storage, timestamp_based_key_ttl, ) @pytest.mark.integration class TestConcurrency: CONCURRENT_REQUESTS: int = 100 @all_storage def test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = limits.strategies.FixedWindowRateLimiter(storage) limit = RateLimitItemPerMinute(5) [limiter.hit(limit, uuid4().hex) for _ in range(self.CONCURRENT_REQUESTS)] key = uuid4().hex hits = [] def hit(): time.sleep(random.random() / 1000) with suppress(ConcurrentUpdateError): if limiter.hit(limit, key): hits.append(None) threads = [ threading.Thread(target=hit) for _ in range(self.CONCURRENT_REQUESTS) ] [t.start() for t in threads] [t.join() for t in threads] assert len(hits) == 5 @sliding_window_counter_storage def test_sliding_window_counter(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = limits.strategies.SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerMinute(5) [limiter.hit(limit, uuid4().hex) for _ in range(self.CONCURRENT_REQUESTS)] key = uuid4().hex hits = [] def hit(): time.sleep(random.random() / 1000) if limiter.hit(limit, key): hits.append(None) threads = [ threading.Thread(target=hit) for _ in range(self.CONCURRENT_REQUESTS) ] [t.start() for t in threads] [t.join() for t in threads] assert len(hits) == 5 @moving_window_storage def test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = limits.strategies.MovingWindowRateLimiter(storage) limit = RateLimitItemPerMinute(5) [limiter.hit(limit, uuid4().hex) for _ in range(self.CONCURRENT_REQUESTS)] key = uuid4().hex hits = [] def hit(): time.sleep(random.random() / 1000) if limiter.hit(limit, key): hits.append(None) threads = [ threading.Thread(target=hit) for _ in range(self.CONCURRENT_REQUESTS) ] [t.start() for t in threads] [t.join() for t in threads] assert len(hits) == 5 @pytest.mark.asyncio @pytest.mark.integration class TestAsyncConcurrency: CONCURRENT_REQUESTS: int = 100 @async_all_storage async def test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = limits.aio.strategies.FixedWindowRateLimiter(storage) limit = RateLimitItemPerMinute(5) [await limiter.hit(limit, uuid4().hex) for _ in range(self.CONCURRENT_REQUESTS)] key = uuid4().hex hits = [] async def hit(): await asyncio.sleep(random.random() / 1000) with suppress(ConcurrentUpdateError): if await limiter.hit(limit, key): hits.append(None) await asyncio.gather(*[hit() for _ in range(self.CONCURRENT_REQUESTS)]) assert len(hits) == 5 @async_sliding_window_counter_storage async def test_sliding_window_counter(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = limits.aio.strategies.SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerMinute(5) if isinstance(storage, TimestampedSlidingWindow): # Avoid testing the behaviour when the window is about to be reset ttl = timestamp_based_key_ttl(limit) if ttl < 1: time.sleep(ttl) key = uuid4().hex hits = [] async def hit(): await asyncio.sleep(random.random() / 1000) if await limiter.hit(limit, key): hits.append(None) await asyncio.gather(*[hit() for _ in range(self.CONCURRENT_REQUESTS)]) assert len(hits) == 5 @async_moving_window_storage async def test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = limits.aio.strategies.MovingWindowRateLimiter(storage) limit = RateLimitItemPerMinute(5) [await limiter.hit(limit, uuid4().hex) for _ in range(self.CONCURRENT_REQUESTS)] key = uuid4().hex hits = [] async def hit(): await asyncio.sleep(random.random() / 1000) if await limiter.hit(limit, key): hits.append(None) await asyncio.gather(*[hit() for _ in range(self.CONCURRENT_REQUESTS)]) assert len(hits) == 5 limits-4.4.1/tests/storage/000077500000000000000000000000001476517132700156445ustar00rootroot00000000000000limits-4.4.1/tests/storage/__init__.py000066400000000000000000000000001476517132700177430ustar00rootroot00000000000000limits-4.4.1/tests/storage/test_memory.py000066400000000000000000000010211476517132700205570ustar00rootroot00000000000000from __future__ import annotations import pickle from limits.storage import MemoryStorage class TestSerialization: def test_pickle(self): storage = MemoryStorage() assert 1 == storage.incr("test", 60) assert storage.acquire_entry("moving_test", 2, 60) dump = pickle.dumps(storage) restored = pickle.loads(dump) assert 2 == restored.incr("test", 60) assert restored.acquire_entry("moving_test", 2, 60) assert not restored.acquire_entry("moving_test", 2, 60) limits-4.4.1/tests/test_limit_granularities.py000066400000000000000000000026571476517132700216720ustar00rootroot00000000000000from __future__ import annotations from limits import limits class TestGranularity: def test_seconds_value(self): assert limits.RateLimitItemPerSecond(1).get_expiry() == 1 assert limits.RateLimitItemPerMinute(1).get_expiry() == 60 assert limits.RateLimitItemPerHour(1).get_expiry() == 60 * 60 assert limits.RateLimitItemPerDay(1).get_expiry() == 60 * 60 * 24 assert limits.RateLimitItemPerMonth(1).get_expiry() == 60 * 60 * 24 * 30 assert limits.RateLimitItemPerYear(1).get_expiry() == 60 * 60 * 24 * 30 * 12 def test_representation(self): assert "1 per 1 second" in str(limits.RateLimitItemPerSecond(1)) assert "1 per 1 minute" in str(limits.RateLimitItemPerMinute(1)) assert "1 per 1 hour" in str(limits.RateLimitItemPerHour(1)) assert "1 per 1 day" in str(limits.RateLimitItemPerDay(1)) assert "1 per 1 month" in str(limits.RateLimitItemPerMonth(1)) assert "1 per 1 year" in str(limits.RateLimitItemPerYear(1)) def test_comparison(self): assert limits.RateLimitItemPerSecond(1) < limits.RateLimitItemPerMinute(1) assert limits.RateLimitItemPerMinute(1) < limits.RateLimitItemPerHour(1) assert limits.RateLimitItemPerHour(1) < limits.RateLimitItemPerDay(1) assert limits.RateLimitItemPerDay(1) < limits.RateLimitItemPerMonth(1) assert limits.RateLimitItemPerMonth(1) < limits.RateLimitItemPerYear(1) limits-4.4.1/tests/test_limits.py000066400000000000000000000027761476517132700171260ustar00rootroot00000000000000from __future__ import annotations from collections import defaultdict from limits import limits class TestLimits: class FakeLimit(limits.RateLimitItem): GRANULARITY = limits.Granularity(1, "fake") class OtherFakeLimit(limits.RateLimitItem): GRANULARITY = limits.Granularity(1, "otherfake") def test_key_all_strings_default_namespace(self): item = self.FakeLimit(1, 1) assert item.key_for("a", "b", "c") == "LIMITER/a/b/c/1/1/fake" def test_key_with_none_default_namespace(self): item = self.FakeLimit(1, 1) assert item.key_for("a", None, None) == "LIMITER/a/None/None/1/1/fake" def test_key_with_int_default_namespace(self): item = self.FakeLimit(1, 1) assert item.key_for("a", 1) == "LIMITER/a/1/1/1/fake" def test_key_with_mixed_string_types_default_namespace(self): item = self.FakeLimit(1, 1) assert item.key_for(b"a", "b") == "LIMITER/a/b/1/1/fake" def test_equality(self): item = self.FakeLimit(1, 1) assert item == self.FakeLimit(1, 1) assert item != self.FakeLimit(1, 2) assert item != self.FakeLimit(2, 1) assert item != "someething else" def test_hashabilty(self): mapping = defaultdict(lambda: 1) mapping[self.FakeLimit(1, 1)] += 1 mapping[self.FakeLimit(1, 1)] += 1 mapping[self.FakeLimit(1, 2)] += 1 mapping[self.FakeLimit(1, 2)] += 1 mapping[self.OtherFakeLimit(1, 2)] += 1 assert len(mapping) == 3 limits-4.4.1/tests/test_ratelimit_parser.py000066400000000000000000000043551476517132700211660ustar00rootroot00000000000000from __future__ import annotations import pytest from limits import limits from limits.util import granularity_from_string, parse, parse_many class TestRatelimitParser: def test_singles(self): for rl_string in ["1 per second", "1/SECOND", "1 / Second"]: assert parse(rl_string) == limits.RateLimitItemPerSecond(1) for rl_string in ["1 per minute", "1/MINUTE", "1/Minute"]: assert parse(rl_string) == limits.RateLimitItemPerMinute(1) for rl_string in ["1 per hour", "1/HOUR", "1/Hour"]: assert parse(rl_string) == limits.RateLimitItemPerHour(1) for rl_string in ["1 per day", "1/DAY", "1 / Day"]: assert parse(rl_string) == limits.RateLimitItemPerDay(1) for rl_string in ["1 per month", "1/MONTH", "1 / Month"]: assert parse(rl_string) == limits.RateLimitItemPerMonth(1) for rl_string in ["1 per year", "1/Year", "1 / year"]: assert parse(rl_string) == limits.RateLimitItemPerYear(1) def test_multiples(self): assert parse("1 per 3 hour").get_expiry() == 3 * 60 * 60 assert parse("1 per 2 hours").get_expiry() == 2 * 60 * 60 assert parse("1/2 day").get_expiry() == 2 * 24 * 60 * 60 def test_parse_many(self): parsed = parse_many("1 per 3 hour; 1 per second") assert len(parsed) == 2 assert parsed[0].get_expiry() == 3 * 60 * 60 assert parsed[1].get_expiry() == 1 def test_parse_many_csv(self): parsed = parse_many("1 per 3 hour, 1 per second") assert len(parsed) == 2 assert parsed[0].get_expiry() == 3 * 60 * 60 assert parsed[1].get_expiry() == 1 @pytest.mark.parametrize("value", [None, "1 per millenium", "meow"]) def test_invalid_string_parse(self, value): with pytest.raises(ValueError): parse(value) @pytest.mark.parametrize("value", ["millenium", "meow"]) def test_invalid_string_granularity(self, value): with pytest.raises(ValueError): granularity_from_string(value) @pytest.mark.parametrize( "value", ["1 per yearl; 2 per decade"], ) def test_invalid_string_parse_many(self, value): with pytest.raises(ValueError): parse_many(value) limits-4.4.1/tests/test_storage.py000066400000000000000000000350311476517132700172570ustar00rootroot00000000000000from __future__ import annotations import time import pytest from pytest_lazy_fixtures import lf from limits import RateLimitItemPerMinute, RateLimitItemPerSecond from limits.errors import ConfigurationError, StorageError from limits.storage import ( EtcdStorage, MemcachedStorage, MemoryStorage, MongoDBStorage, MovingWindowSupport, RedisClusterStorage, RedisSentinelStorage, RedisStorage, SlidingWindowCounterSupport, Storage, storage_from_string, ) from limits.strategies import MovingWindowRateLimiter, SlidingWindowCounterRateLimiter from tests.utils import fixed_start class TestBaseStorage: @pytest.mark.parametrize( "uri, args", [("blah://", {}), ("redis+sentinel://localhost:26379", {})] ) def test_invalid_storage_string(self, uri, args): with pytest.raises(ConfigurationError): storage_from_string(uri, **args) def test_pluggable_storage_fixed_only(self): class MyStorage(Storage): STORAGE_SCHEME = ["mystorage+fixed"] @property def base_exceptions(self): return ValueError def incr(self, key, expiry, elastic_expiry=False): return def get(self, key): return 0 def get_expiry(self, key): return time.time() def reset(self): return def check(self): return def clear(self): return storage = storage_from_string("mystorage+fixed://") assert isinstance(storage, MyStorage) with pytest.raises(NotImplementedError): MovingWindowRateLimiter(storage) with pytest.raises(NotImplementedError): SlidingWindowCounterRateLimiter(storage) def test_pluggable_storage_moving_window(self): class MyStorage(Storage, MovingWindowSupport): STORAGE_SCHEME = ["mystorage+moving"] @property def base_exceptions(self): return ValueError def incr(self, key, expiry, elastic_expiry=False): return def get(self, key): return 0 def get_expiry(self, key): return time.time() def reset(self): return def check(self): return def clear(self): return def acquire_entry(self, *a, **k): return True def get_moving_window(self, *a, **k): return (time.time(), 1) storage = storage_from_string("mystorage+moving://") assert isinstance(storage, MyStorage) MovingWindowRateLimiter(storage) def test_pluggable_storage_sliding_window_counter(self): class MyStorage(Storage, SlidingWindowCounterSupport): STORAGE_SCHEME = ["mystorage+sliding"] @property def base_exceptions(self): return ValueError def incr(self, key, expiry, elastic_expiry=False): return def get(self, key): return 0 def get_expiry(self, key): return time.time() def reset(self): return def check(self): return def clear(self): return def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: pass def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: pass storage = storage_from_string("mystorage+sliding://") assert isinstance(storage, MyStorage) SlidingWindowCounterRateLimiter(storage) @pytest.mark.parametrize( "uri, args, expected_instance, fixture", [ pytest.param("memory://", {}, MemoryStorage, None, id="in-memory"), pytest.param( "redis://localhost:7379", {}, RedisStorage, lf("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "redis+unix:///tmp/limits.redis.sock", {}, RedisStorage, lf("redis_uds"), marks=pytest.mark.redis, id="redis-uds", ), pytest.param( "redis+unix://:password/tmp/limits.redis.sock", {}, RedisStorage, lf("redis_uds"), marks=pytest.mark.redis, id="redis-uds-auth", ), pytest.param( "memcached://localhost:22122", {}, MemcachedStorage, lf("memcached"), marks=pytest.mark.memcached, id="memcached", ), pytest.param( "memcached://localhost:22122,localhost:22123", {}, MemcachedStorage, lf("memcached_cluster"), marks=pytest.mark.memcached, id="memcached-cluster", ), pytest.param( "memcached:///tmp/limits.memcached.sock", {}, MemcachedStorage, lf("memcached_uds"), marks=pytest.mark.memcached, id="memcached-uds", ), pytest.param( "redis+sentinel://localhost:26379", {"service_name": "mymaster"}, RedisSentinelStorage, lf("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "redis+sentinel://localhost:26379/mymaster", {}, RedisSentinelStorage, lf("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel-service-name-url", ), pytest.param( "redis+sentinel://:sekret@localhost:36379/mymaster", {"password": "sekret"}, RedisSentinelStorage, lf("redis_sentinel_auth"), marks=pytest.mark.redis_sentinel, id="redis-sentinel-auth", ), pytest.param( "redis+cluster://localhost:7001/", {}, RedisClusterStorage, lf("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "redis+cluster://:sekret@localhost:8400/", {}, RedisClusterStorage, lf("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "mongodb://localhost:37017/", {}, MongoDBStorage, lf("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "etcd://localhost:2379", {}, EtcdStorage, lf("etcd"), marks=pytest.mark.etcd, id="etcd", ), ], ) class TestConcreteStorages: def test_storage_string(self, uri, args, expected_instance, fixture): assert isinstance(storage_from_string(uri, **args), expected_instance) @fixed_start def test_expiry_incr(self, uri, args, expected_instance, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerSecond(1) storage.incr(limit.key_for(), limit.get_expiry()) time.sleep(1.1) assert storage.get(limit.key_for()) == 0 @fixed_start def test_expiry_acquire_entry(self, uri, args, expected_instance, fixture): if not issubclass(expected_instance, MovingWindowSupport): pytest.skip(f"{expected_instance} does not support acquire entry") storage = storage_from_string(uri, **args) limit = RateLimitItemPerSecond(1) assert storage.acquire_entry(limit.key_for(), limit.amount, limit.get_expiry()) time.sleep(1.1) assert storage.get(limit.key_for()) == 0 @fixed_start def test_expiry_acquire_sliding_window_entry( self, uri, args, expected_instance, fixture ): if not issubclass(expected_instance, SlidingWindowCounterSupport): pytest.skip(f"{expected_instance} does not support acquire entry") storage = storage_from_string(uri, **args) limit = RateLimitItemPerSecond(1) assert storage.acquire_sliding_window_entry( limit.key_for(), limit.amount, limit.get_expiry() ) assert storage.get_sliding_window(limit.key_for(), limit.get_expiry())[ -1 ] == pytest.approx(2, abs=1e2) def test_incr_custom_amount(self, uri, args, expected_instance, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerMinute(1) assert 1 == storage.incr(limit.key_for(), limit.get_expiry(), amount=1) assert 11 == storage.incr(limit.key_for(), limit.get_expiry(), amount=10) def test_acquire_entry_custom_amount(self, uri, args, expected_instance, fixture): if not issubclass(expected_instance, MovingWindowSupport): pytest.skip(f"{expected_instance} does not support acquire entry") storage = storage_from_string(uri, **args) limit = RateLimitItemPerMinute(10) assert not storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=11 ) assert storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=1 ) assert not storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=10 ) def test_storage_check(self, uri, args, expected_instance, fixture): assert storage_from_string(uri, **args).check() def test_storage_reset(self, uri, args, expected_instance, fixture): if expected_instance == MemcachedStorage: pytest.skip("Reset not supported for memcached") limit1 = RateLimitItemPerMinute(10) # default namespace, LIMITER limit2 = RateLimitItemPerMinute(10, namespace="OTHER") storage = storage_from_string(uri, **args) for i in range(10): storage.incr(limit1.key_for(str(i)), limit1.get_expiry()) storage.incr(limit2.key_for(str(i)), limit2.get_expiry()) assert storage.reset() == 20 def test_storage_clear(self, uri, args, expected_instance, fixture): limit = RateLimitItemPerMinute(10) storage = storage_from_string(uri, **args) storage.incr(limit.key_for(), limit.get_expiry()) assert 1 == storage.get(limit.key_for()) storage.clear(limit.key_for()) assert 0 == storage.get(limit.key_for()) @pytest.mark.parametrize("wrap_exceptions", (True, False)) class TestStorageErrors: class MyStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport): STORAGE_SCHEME = ["mystorage"] class MyError(Exception): pass @property def base_exceptions(self): return self.MyError def incr(self, key, expiry, elastic_expiry=False, amount=1): raise self.MyError() def get(self, key): raise self.MyError() def get_expiry(self, key): raise self.MyError() def reset(self): raise self.MyError() def check(self): raise self.MyError() def clear(self, key): raise self.MyError() def acquire_entry(self, key, limit, expiry, amount=1): raise self.MyError() def get_moving_window(self, key, limit, expiry): raise self.MyError() def acquire_sliding_window_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: raise self.MyError() def get_sliding_window( self, key: str, expiry: int ) -> tuple[int, float, int, float]: raise self.MyError() def assert_exception(self, exc, wrap_exceptions): if wrap_exceptions: assert isinstance(exc, StorageError) assert isinstance(exc.storage_error, self.MyStorage.MyError) else: assert isinstance(exc, self.MyStorage.MyError) def test_incr_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).incr("", 1) self.assert_exception(exc.value, wrap_exceptions) def test_get_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).get("") self.assert_exception(exc.value, wrap_exceptions) def test_get_expiry_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).get_expiry("") self.assert_exception(exc.value, wrap_exceptions) def test_reset_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).reset() self.assert_exception(exc.value, wrap_exceptions) def test_check_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).check() self.assert_exception(exc.value, wrap_exceptions) def test_clear_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).clear("") self.assert_exception(exc.value, wrap_exceptions) def test_acquire_entry_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).acquire_entry("", 1, 1) self.assert_exception(exc.value, wrap_exceptions) def test_get_moving_window_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).get_moving_window("", 1, 1) self.assert_exception(exc.value, wrap_exceptions) def test_acquire_sliding_entry_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage( wrap_exceptions=wrap_exceptions ).acquire_sliding_window_entry("", 1, 1) self.assert_exception(exc.value, wrap_exceptions) def test_get_sliding_window_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).get_sliding_window("", 1) self.assert_exception(exc.value, wrap_exceptions) limits-4.4.1/tests/test_strategy.py000066400000000000000000000366261476517132700174700ustar00rootroot00000000000000from __future__ import annotations import time from math import ceil import pytest from limits.limits import ( RateLimitItemPerHour, RateLimitItemPerMinute, RateLimitItemPerSecond, ) from limits.storage import storage_from_string from limits.storage.base import TimestampedSlidingWindow from limits.strategies import ( FixedWindowElasticExpiryRateLimiter, FixedWindowRateLimiter, MovingWindowRateLimiter, SlidingWindowCounterRateLimiter, ) from tests.utils import ( all_storage, fixed_start, moving_window_storage, sliding_window_counter_storage, timestamp_based_key_ttl, window, ) @all_storage class TestFixedWindow: @fixed_start def test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) with window(1) as (start, end): assert all([limiter.hit(limit) for _ in range(0, 10)]) assert not limiter.hit(limit) assert limiter.get_window_stats(limit).remaining == 0 assert limiter.get_window_stats(limit).reset_time == pytest.approx( start + 2, 1e-2 ) @fixed_start def test_fixed_window_empty_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert limiter.get_window_stats(limit).remaining == 10 assert limiter.get_window_stats(limit).reset_time == pytest.approx( time.time(), 1e-2 ) @fixed_start def test_fixed_window_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerMinute(10, 2) assert not limiter.hit(limit, "k1", cost=11) assert limiter.hit(limit, "k2", cost=5) assert limiter.get_window_stats(limit, "k2").remaining == 5 assert not limiter.test(limit, "k2", cost=6) assert not limiter.hit(limit, "k2", cost=6) @fixed_start def test_fixed_window_with_elastic_expiry(self, uri, args, fixture): storage = storage_from_string(uri, **args) with pytest.warns(DeprecationWarning): limiter = FixedWindowElasticExpiryRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) with window(1) as (start, end): assert all([limiter.hit(limit) for _ in range(0, 10)]) assert not limiter.hit(limit) assert limiter.get_window_stats(limit).remaining == 0 assert limiter.get_window_stats(limit).reset_time == pytest.approx( start + 2, 1e-2 ) with window(3) as (start, end): assert not limiter.hit(limit) assert limiter.hit(limit) assert limiter.get_window_stats(limit).remaining == 9 assert limiter.get_window_stats(limit).reset_time == pytest.approx( end + 2, 1e-2 ) @fixed_start def test_fixed_window_with_elastic_expiry_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) with pytest.warns(DeprecationWarning): limiter = FixedWindowElasticExpiryRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert not limiter.hit(limit, "k1", cost=11) with window(0) as (start, end): assert limiter.hit(limit, "k2", cost=5) assert limiter.get_window_stats(limit, "k2").remaining == 5 assert limiter.get_window_stats(limit, "k2").reset_time == pytest.approx( end + 2, 1e-2 ) assert not limiter.hit(limit, "k2", cost=6) @fixed_start @pytest.mark.flaky def test_test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerHour(2, 1) assert limiter.hit(limit) assert limiter.test(limit) assert limiter.hit(limit) assert not limiter.test(limit) assert not limiter.hit(limit) @sliding_window_counter_storage class TestSlidingWindow: @fixed_start def test_sliding_window_counter(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) if isinstance(storage, TimestampedSlidingWindow): next_second_from_now = ceil(time.time()) if next_second_from_now % 2 == 0: # Next second is even, so the curent one is odd. # Must wait a full period for memcached. time.sleep(1) next_second_from_now = ceil(time.time()) with window(1) as (start, end): assert all([limiter.hit(limit) for _ in range(0, 10)]) assert not limiter.hit(limit) assert limiter.get_window_stats(limit).remaining == 0 if isinstance(storage, TimestampedSlidingWindow): # If the key is timestamp-based, the reset time is periodic according to the worker's timestamp reset_time = limiter.get_window_stats(limit).reset_time expected_reset = int( limit.get_expiry() - (next_second_from_now % limit.get_expiry()) ) assert reset_time - next_second_from_now == pytest.approx( expected_reset, abs=1e-2 ) else: assert limiter.get_window_stats(limit).reset_time == pytest.approx( start + 2, 1e-2 ) @pytest.mark.flaky def test_sliding_window_counter_total_reset(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) multiple = 10 period = 1 limit = RateLimitItemPerSecond(multiple, period) assert limiter.get_window_stats(limit).remaining == multiple if isinstance(storage, TimestampedSlidingWindow): # Avoid testing the behaviour when the window is about to be reset ttl = timestamp_based_key_ttl(limit) if ttl < 0.5: time.sleep(ttl) assert limiter.hit(limit, cost=multiple) assert not limiter.hit(limit) assert limiter.get_window_stats(limit).remaining == 0 time.sleep(period * 2) assert limiter.get_window_stats(limit).remaining == multiple assert limiter.get_window_stats(limit).reset_time == pytest.approx( time.time(), abs=1e-2 ) def test_sliding_window_counter_current_window(self, uri, args, fixture): """Check the window stats when only the current window is filled""" storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerHour(2, 24) if isinstance(storage, TimestampedSlidingWindow): # Avoid testing the behaviour when the window is about to be reset ttl = timestamp_based_key_ttl(limit) if ttl < 0.5: time.sleep(ttl) assert limiter.hit(limit) now = time.time() if isinstance(storage, TimestampedSlidingWindow): expected_reset_time = now + timestamp_based_key_ttl(limit, now) else: expected_reset_time = now + 24 * 3600 assert limiter.get_window_stats(limit).reset_time == pytest.approx( expected_reset_time, 1e-2 ) assert limiter.get_window_stats(limit).remaining == 1 assert limiter.hit(limit) assert not limiter.hit(limit) @pytest.mark.flaky(max_runs=3) def test_sliding_window_counter_previous_window(self, uri, args, fixture): """Check the window stats when the previous window is partially filled""" storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) period = 1 limit = RateLimitItemPerSecond(5, period) sleep_margin = 0.001 if isinstance(storage, TimestampedSlidingWindow): # Avoid testing the behaviour when the window is about to be reset ttl = timestamp_based_key_ttl(limit) if ttl < 0.3: time.sleep(ttl + sleep_margin) previous_window_hits = 3 for i in range(previous_window_hits): limiter.hit(limit) now = time.time() # Check the stats: only the current window is filled assert limiter.get_window_stats(limit).remaining == 2 if isinstance(storage, TimestampedSlidingWindow): expected_reset_time = now + timestamp_based_key_ttl(limit, now) else: expected_reset_time = now + period assert limiter.get_window_stats(limit).reset_time == pytest.approx( expected_reset_time, 1e-2 ) # Wait for the next window sleep_time = expected_reset_time - time.time() + sleep_margin time.sleep(sleep_time) # A new hit should be available immediately after window shift # The limiter should reset in a fraction of a period, according to how many hits are in the previous window reset_time = limiter.get_window_stats(limit).reset_time reset_in = reset_time - time.time() assert reset_in == pytest.approx( limit.get_expiry() / previous_window_hits, abs=0.03 ) assert limiter.get_window_stats(limit).remaining == 3 assert limiter.hit(limit) assert limiter.hit(limit) for i in range(previous_window_hits): # A new item hit should be freed by the previous window t0 = time.time() assert limiter.get_window_stats(limit).remaining == 1 assert limiter.hit(limit) assert limiter.get_window_stats(limit).remaining == 0 assert not limiter.hit(limit) # The previous window has 4 hits. The reset time should be in a 1/4 of the window expiry reset_time = limiter.get_window_stats(limit).reset_time t1 = time.time() reset_in = reset_time - time.time() assert reset_in == pytest.approx( limit.get_expiry() / previous_window_hits - (t1 - t0), abs=0.03 ) # Wait for the next hit available time.sleep(reset_in + sleep_margin) @fixed_start def test_sliding_window_counter_empty_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert limiter.get_window_stats(limit).remaining == 10 assert limiter.get_window_stats(limit).reset_time == pytest.approx( time.time(), 1e-2 ) @fixed_start def test_sliding_window_counter_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerMinute(10, 2) if isinstance(storage, TimestampedSlidingWindow): # Avoid testing the behaviour when the window is about to be reset ttl = timestamp_based_key_ttl(limit) if ttl < 0.5: time.sleep(ttl) assert not limiter.hit(limit, "k1", cost=11) assert limiter.hit(limit, "k2", cost=5) assert limiter.get_window_stats(limit, "k2").remaining == 5 assert not limiter.test(limit, "k2", cost=6) assert not limiter.hit(limit, "k2", cost=6) @fixed_start @pytest.mark.flaky def test_test_sliding_window_counter(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = SlidingWindowCounterRateLimiter(storage) limit = RateLimitItemPerHour(2, 1) assert limiter.hit(limit) assert limiter.test(limit) assert limiter.hit(limit) assert not limiter.test(limit) assert not limiter.hit(limit) @moving_window_storage class TestMovingWindow: def test_moving_window_empty_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert limiter.get_window_stats(limit).remaining == 10 assert limiter.get_window_stats(limit).reset_time == pytest.approx( time.time() + 2, 1e-2 ) def test_moving_window_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerMinute(2) assert limiter.hit(limit, "key") time.sleep(1) assert limiter.hit(limit, "key") time.sleep(1) assert not limiter.hit(limit, "key") assert limiter.get_window_stats(limit, "key").remaining == 0 assert limiter.get_window_stats( limit, "key" ).reset_time - time.time() == pytest.approx(58, 1e-2) def test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) # 5 hits in the first 500ms with window(0.5): assert all(limiter.hit(limit) for i in range(5)) # 5 hits in the last 200ms with window(2, delay=1.3): assert all(limiter.hit(limit) for i in range(5)) # 11th fails assert not limiter.hit(limit) def test_moving_window_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert not limiter.hit(limit, "k1", cost=11) # 5 hits in the first 100ms with window(0.1): limiter.hit(limit, "k2", cost=5) # 5 hits in the last 100ms with window(2, delay=1.8): assert all(limiter.hit(limit, "k2") for i in range(4)) assert not limiter.test(limit, "k2", cost=2) assert not limiter.hit(limit, "k2", cost=2) assert limiter.hit(limit, "k2") # 5 more succeed since there were only 5 in the last 2 seconds assert all([limiter.hit(limit, "k2") for i in range(5)]) assert limiter.get_window_stats(limit, "k2")[1] == 0 assert not limiter.hit(limit, "k2", cost=2) def test_moving_window_varying_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) five_per_min = RateLimitItemPerMinute(5) limiter.hit(five_per_min, cost=5) assert not limiter.hit(five_per_min, cost=2) limiter.clear(five_per_min) assert limiter.hit(five_per_min) def test_moving_window_huge_cost_sync(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) many_per_min = RateLimitItemPerMinute(1_000_000) limiter.hit(many_per_min, cost=1_000_000) assert not limiter.hit(many_per_min, cost=2) limiter.clear(many_per_min) assert limiter.hit(many_per_min) def test_test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerHour(2, 1) limiter = MovingWindowRateLimiter(storage) assert limiter.hit(limit) assert limiter.test(limit) assert limiter.hit(limit) assert not limiter.test(limit) assert not limiter.hit(limit) limits-4.4.1/tests/test_utils.py000066400000000000000000000016301476517132700167510ustar00rootroot00000000000000from __future__ import annotations import pytest from packaging.version import Version from limits.errors import ConfigurationError from limits.util import LazyDependency def test_lazy_dependency_found(): class Demo(LazyDependency): DEPENDENCIES = ["redis"] d = Demo() assert d.dependencies["redis"].version_found def test_lazy_dependency_version_low(): class Demo(LazyDependency): DEPENDENCIES = { "redis": Version("999.999"), "maythisneverexist": Version("1.0"), } d = Demo() with pytest.raises( ConfigurationError, match="minimum version of 999.999 for 'redis' could not be found", ): assert d.dependencies["redis"].version_found with pytest.raises( ConfigurationError, match="'maythisneverexist' prerequisite not available" ): assert d.dependencies["maythisneverexist"].version_found limits-4.4.1/tests/tls/000077500000000000000000000000001476517132700150025ustar00rootroot00000000000000limits-4.4.1/tests/tls/ca.crt000066400000000000000000000036441476517132700161060ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFdzCCA1+gAwIBAgIUEXC8MFUMyWxpXqJvl+JLi/KXpckwDQYJKoZIhvcNAQEL BQAwNTETMBEGA1UECgwKUmVkaXMgVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUg QXV0aG9yaXR5MB4XDTI0MTEyODIxMTQwN1oXDTM0MTEyNjIxMTQwN1owNTETMBEG A1UECgwKUmVkaXMgVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9yaXR5 MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAzcqmRj5FJYesWZs0/8Aw NKhdK5fcYF/3Rycvr+cB7W8IHyEgWYIXIkbWeS4shzphZG9dUs7DidZv8QtC12PW aXdEwS54PLzSreaWiEPZwSaV/Ia5a/A9JmddFACIyWBjt2Fzi4UlkwIxmRtbv1IQ zmkIa0vRtpzKucmhl2hgj/oFpN0Md9R66QLGRx3n3wX6bXKvCemPxASI/Nqrih60 ZJK91enRD354MSOTDQ2tFZ269uW2UtZAjOYUQHQ5InoU4fSqsdAgRdU+/wJV/ss7 ONQh79q9zGs8QmonXneAKI3xQCAOK2qyyUdlBMqnoeF+4TRx/eHzR1FNyqSPO+BD N9YRutjJ/Ydwg8fZIKkY1fTshD10yQkmTrUQ25LqeeGrRXg218xY5naY3DDUyzyD qGTjJsqChJSs5Q9OkR7+jSTMFe8LvVfd8iLSaemCCiZ6b/GZFzRTcSsHMg7Zl7ae cArVbP81gj00HDiFmyUstlxdpYqlRifmx1sv7cEUJt8PnnlPxtXIuJOuOz9Fe497 wSYdjPul6jX+dSS9zTjv5j/tQNOovY23obhxh3A4NrOVisNj/Rt+PNT2F4UbzuQ/ Oi4PS+FSALs+YmyW9T99o0SvhxjBccslBGOn+u1zRdrpeoCi/8o1nWaeg2JNeUGG DphalU81McRcmtGJqs3bvIECAwEAAaN/MH0wHQYDVR0OBBYEFAwEX75Bhlwn5xys MWNESWgpeA75MB8GA1UdIwQYMBaAFAwEX75Bhlwn5xysMWNESWgpeA75MA8GA1Ud EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgG2MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr BgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAgEAnOAkQWW4rne99qj1fuJ2yXU1ti/T xkEjkOhFCWAToH+P9BntaFLt39ykBp9Z9T/tn0Jd+akHuM+8eMJqDtNsSB/fpg+5 Kz/wzXpXqiLvnFu9o3Y2cmC4G+Vz9Ht4TuxNKf3+T77cpLsI80rHF26DpYA5NRO+ h85xpIeOg1S5fMd6pMYwJFO/j2YWdjJbW89tqEmaDx3ruaTa0Sdj++oGpgEvBNre 3czBGjpK/N3w2nPfRF9IZGGeGdnbh4sc2eZ7BpZruu16KoFElvITbxELcpiFV+0A fwB2YZRitY/N9rxToBiTP8WWVIgnUh0qP2Q6nIgAI1gds8VD/z+pExu40eKJNsjy RojtvsQ+C0g/t5Sb5k9654Br6gHGgRz+dp0cybp5Au5zTH5qj2vMJJ6sK40pHL7W UxyY1beI6gyX1M33sqhweXrViwCAjLsII68UOQVZu4a5mY11IqEOVc3O0haV2AYB q9oPx5UJMrHiQ7ma52RAkJwYvkDCtf6FYkeeSjCB+3JHImRiBgUr4AnAA0112m1E M5wfoFFoBXg00gdulZAhJbqXye4GS0c7GrnNJ+k+oolPsPmEBjptkowk+cWthBZm hGCUoJVkjshUj6/+AsxMcusnbvaAkn9C6LE90ORMSBEWSd8EQM2B7Zs6pYVKAN+v TIeWPUgHb6JpZaU= -----END CERTIFICATE----- limits-4.4.1/tests/tls/ca.key000066400000000000000000000063101476517132700160770ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDNyqZGPkUlh6xZ mzT/wDA0qF0rl9xgX/dHJy+v5wHtbwgfISBZghciRtZ5LiyHOmFkb11SzsOJ1m/x C0LXY9Zpd0TBLng8vNKt5paIQ9nBJpX8hrlr8D0mZ10UAIjJYGO3YXOLhSWTAjGZ G1u/UhDOaQhrS9G2nMq5yaGXaGCP+gWk3Qx31HrpAsZHHeffBfptcq8J6Y/EBIj8 2quKHrRkkr3V6dEPfngxI5MNDa0Vnbr25bZS1kCM5hRAdDkiehTh9Kqx0CBF1T7/ AlX+yzs41CHv2r3MazxCaided4AojfFAIA4rarLJR2UEyqeh4X7hNHH94fNHUU3K pI874EM31hG62Mn9h3CDx9kgqRjV9OyEPXTJCSZOtRDbkup54atFeDbXzFjmdpjc MNTLPIOoZOMmyoKElKzlD06RHv6NJMwV7wu9V93yItJp6YIKJnpv8ZkXNFNxKwcy DtmXtp5wCtVs/zWCPTQcOIWbJSy2XF2liqVGJ+bHWy/twRQm3w+eeU/G1ci4k647 P0V7j3vBJh2M+6XqNf51JL3NOO/mP+1A06i9jbehuHGHcDg2s5WKw2P9G3481PYX hRvO5D86Lg9L4VIAuz5ibJb1P32jRK+HGMFxyyUEY6f67XNF2ul6gKL/yjWdZp6D Yk15QYYOmFqVTzUxxFya0Ymqzdu8gQIDAQABAoICABZHzAPL5yGelE7pSm4sKSvy VJANM4ttqHCYMgYJ7T//JvJz3DRErOuFnt/k22ZSsEu28PQvtaT9RgZjcTuvr/2g L534wYTV7rtsPT0Il5Fdc5yxG28vxfcb9sxdGHIxEOrATsm6wyws7yDEAQVwRaiL X6F6RnzOcfZ2eZvSba345FaKpjhtFVeX+M6nUrznQUtLQN9CXdHbqdcyd6MPGvnO l2YLGBrk1u9/CgM+SVCPULg9e6VxU/DNUkD+NFdiv1MNcdOplGzrHxRt9NCOwV0w PnJhDO6OZ6pLIvML3iDF6WpHbjFOLogzRiEkLc0NhHaufm794SCGEXWQpn9VkwFN UvPmDQiIlVsWBwumGUxy5bebYo/325fVoCMOQ1YBsEaPM1rFKlLBfyyNBI2CL3Wj MQ11NzInE8VMu830dYrnnd7AXwRsBJGW5wRmel85a2kRqSRkPUMQvqoqc9TgJG1d LMPtWQ4HUA4LEpQIAMqKHM8Ww9xLwBTOWjcZTz8Pc6HyWbnlskOgNCpfCEy6XLEo eIUZTZz3i2PLr36/rj5YTvslAvATu8cmJ61hRjDpCYOHbtiD/i8WDNFiMtasKTi2 bGncmwsHbhk6i2tJcNcdDD9nlEVl6YVS+Yiivs18CQJQz6qBA0UZjmF+bjbMA6Yb XuDTBqGr7KVoSNazWrYpAoIBAQDZnyGHU2j/4gTCnEWylCwN8q4cQ/YRVUNi7t6X R/3f/aoU4MB9XTqJABrz8mq7ssCnunEokiHBkV2o8e70dUmVrhySeAoXPUMGHu+3 JWfLge1PjeQ5qrSPqvxVK1J/37xIvB+/XnLXXkpIh8gF0rSgeS20Ii7MpLwuHSMu zi/tfijVH+VVNZpz8cnOAApqruPjYpMZsyl6nf0ik5fmqMjzOc/4goqhD4HMp9WR z1otNOuQ4oQyVAKwewyB8DzuXUux0qX2kKnqTjJMXusH7B5pvmDWnw10MkbArkoj nOxfP07GIE1bWUz1kFUZniORWEj8yof0EQACx3mNeDKRI7P9AoIBAQDyFW8yE6Xh IJMjihzSAgmF6rgXC93CM18ERl4X/zK/MSkuH2DD27nu7yUP48h5kLVrxohX71zO vqW+B9KOkN0NYmctkvfR2MiH7LTA4geegZNaZp84lWMl8z2auZHBv0d7Fx2DM1rv NCwhgX30Xp+VfdoBvJ4JQR8hWM4EG9jK1IjRbJM/zoKtiO/3EJg2MX0MsgIGXNDj xc2RTHMnoegnL1ILcDG1qMKkA7oYuqV1tinMJ0/uPrL/2H2U0F3YFb7yGmYOXmxS Acn2YTZdcm9NTM91QJKx3ugeu26SsUUWDEHsexvndqgxST2bOgPVk6xZ/UHDt/Ma RZTUgKFXrFfVAoIBAQCWQ26fRlTF+oZW6himCfGxX9Qg6DxME5eIBG/l/Wuq8jgy Wk4PQgRc/Jmfv886815bcj0YSm7MfVcyhHr+iM3acHKCICi3izFwVIQo7ccRmhEG UW848kniuamqFVPNv+r+Pged19VsKEsacccHXkoiVAney4uM+mKFZNEaTHyDfA0t u+xaPbz8OgOiHh0TNNdd9n6jzDXj2Jsk+Mx2FC1M5KpZaanXNCHn8ithls8pT9Uo IYAuUBvbR9uHDR7FZ94QVucJrTHl+0B3w0J+IeeOFEpBZ2mexehcuvuuQ3hi/A+k dkFzxeGB3bslCTB993vOMPpW6DYdtSaVLF2eIgzxAoIBAQCkBJfBMwS5omtusd3S /+RZ4mz12GlYcZvX9qE72wET/wfqowHZ0Z6JX2gZ+vfSPZRdwDaSda/It+zt8rjb NRVAQ0Kq4x+bAyjixyK0aLBG2X3vywXnvXdeuA4uLbqurZ2NrVymjv32gb/At86R 6iZvj8RMVvIwTPn9iwCSUhexXQyR7+FYYt+xGc2T9J9cYG1cU6MBkpNJL7SW1FhV KuJy4iHRdCOJ9BxkKc5FgPgn59Mr6hrCzaJTq+dyEzbx99HWkMJCzADSEzo1/ylp aOY2VmtdAvIhJ2jXoaBKLWckiAuGYObIDWHperkN2eHNK++jTO7o31ILo8cxrzEh iLg9AoIBADSBU7pCZEIvmM69R0R8c1ZFZzLjy+4iFWeTkQr8l+XzSdAALK8IhHsG 2JRTYAlXjHJyZigCTYCIcI8qIcAnxphFa1IB351Z9Xbn1EYGlA8b/TTk5lEG3sSf 5IkcMcpZtEy0JBzd5GKN+hTQGO4/bLwx9UC4FXzOo2LwhwFzsbR0GQ17aGisclFV f8rOPIopGJ0s5TU8AglWZl3ezSmkEhIrvuyQXLzvBfiGhNqMiLPMx5KyqttM2QgC DSWI5wp3ja3L4acpOzVF9XndsSlnRqmot31g0VtWBPSHkq/y6fkUHOX1IuYwjD4P 8Q5HHB4sS/AyhVx3WB5Sifo8GLusFss= -----END PRIVATE KEY----- limits-4.4.1/tests/tls/ca.txt000066400000000000000000000000511476517132700161220ustar00rootroot0000000000000035EF919824B8DFBE68EC6860AB34C681883BCD23 limits-4.4.1/tests/tls/client.crt000066400000000000000000000030261476517132700167730ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIEUDCCAjigAwIBAgIUewkZ5tzFxJ2ElG5DuKGqO4okTi0wDQYJKoZIhvcNAQEL BQAwNTETMBEGA1UECgwKUmVkaXMgVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUg QXV0aG9yaXR5MB4XDTI0MTEyODIxMTQwOFoXDTI1MTEyODIxMTQwOFowKzETMBEG A1UECgwKUmVkaXMgVGVzdDEUMBIGA1UEAwwLQ2xpZW50LW9ubHkwggEiMA0GCSqG SIb3DQEBAQUAA4IBDwAwggEKAoIBAQC26ylZru45zFlNWIGfpJbfxrHU6gwI2hWk 6CVTgxkr0yIgPXNNScrgAQQFxXn4vy1wmItCXyuAkOlllEpCqEmvppJzB9DvtJmg ybiczCMr+ati3ZEUcZzlIpNHZXEMXQkt4Gmw7afC7RaqW0infRcJyZwP2U4tFxqt MMMpI6ikWznus8uenCXptBe0/oTE5XhzRLvCUYKsiihomSlAHE2fhbPsPd81E+cq 7JPmWkqfH9hbS7qD01/bXiImIBAS3lK6mtoKW9YoDKWyjvzNSkFcQaDWyko8UMf0 p0G7ZFnLR3nVIG9ZzDAMVUpTFuyGdcCpd5NNP4ow51jJ4hYnM6DhAgMBAAGjYjBg MAsGA1UdDwQEAwIFoDARBglghkgBhvhCAQEEBAMCB4AwHQYDVR0OBBYEFD2OYj1B qRSzIYr9GmBA3PPEkIkvMB8GA1UdIwQYMBaAFAwEX75Bhlwn5xysMWNESWgpeA75 MA0GCSqGSIb3DQEBCwUAA4ICAQAS0Y4QV7EKcJv5BfOe+T6qu1IAlNv0CWLFbrCH uohrIAD0oxPLfjVBYiBHWaTS8qzyhbGAa96CaB/sUhaX7ecvVld5OXxKZIkkG5pw KpF5Xd2Tox877V4BFKwfpkyFV7mILNKkLfIk+9s4nN7fA5kTdEZlcUJ+cRcjbqSA DliD8STRTn5htiEcgOEEnXbQxHgMKrxOC0Df7KItrUKAsVUEldcAvyIJs+pztagY SYPrlPp4yb/ERQZkIJKGtRwNmqV9UIHhJwMnCZXBzUSm0YI8i57BXkX4wqijbFaS OJ2mSMJn66AMMblTLQgQFeT34Ke484K+TMkJWkXku68PHr4YlCHTaRHpqMzmqXhw ixlBmior4WvroYeSjib5uKcFlnsKGpN5BWLUMGevc60s1PJCBnGlJGTmJqPC2fHo aoyvguZupmkr3QdH4zLFQ8A+7oeuCFbqe5ZQX5AHL4/0uIGLtz9kxr+KayEWf80w T6mLHeUyg++UMR3Z/AhJfUXHc1GM2TbD4ZWDWmS0fs/OknKmyV+Wx7lLUlyka1Pw QoPAjTqJyGzU8SuivYogrGNDCun7dq2FfYPbCnrahm0OunNI4BFpbUTjbDfkENml IMJ9QgaDN/3gVDGyjsEZzdnU2F/CHtkIATrVwAXVpFnupD8fxWipJuDMGK3ni3Ks 2w7eXQ== -----END CERTIFICATE----- limits-4.4.1/tests/tls/client.key000066400000000000000000000032501476517132700167720ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC26ylZru45zFlN WIGfpJbfxrHU6gwI2hWk6CVTgxkr0yIgPXNNScrgAQQFxXn4vy1wmItCXyuAkOll lEpCqEmvppJzB9DvtJmgybiczCMr+ati3ZEUcZzlIpNHZXEMXQkt4Gmw7afC7Raq W0infRcJyZwP2U4tFxqtMMMpI6ikWznus8uenCXptBe0/oTE5XhzRLvCUYKsiiho mSlAHE2fhbPsPd81E+cq7JPmWkqfH9hbS7qD01/bXiImIBAS3lK6mtoKW9YoDKWy jvzNSkFcQaDWyko8UMf0p0G7ZFnLR3nVIG9ZzDAMVUpTFuyGdcCpd5NNP4ow51jJ 4hYnM6DhAgMBAAECggEABPXAoopW3TGFwg49FOgzNiCuvZxWFLRd2Lb5x3dbKqAi 7AG2kMPdkNro3MOI0JOrMViAPYhpRyccHmbEZ7p6pbJsL1so0Xh8dPVwqyxxj2BJ rdiJC1H0WZXpLUsFIyXFBAiH3SYTQeVzaKL+h+92p+DeuY2ggXeqVRlo0AKumzxD 8pvl1tOUfVTDbehIFy5kGdqDc9XNPQovCC25y8WP15ChMbQO/vb4KWQ5zF1c5jxc h+eMcVln+qFKM8+p08FstCRfSdzPl2h0MYRbQGXjBMv/Wz/XOMwYritC9bEEgL3J M1PEWuNERHvd/0J7zVa/RJn+w7AcNhXXaX31CH7TVQKBgQDNP3bHZYev+GZWfw/4 aUlrBIaBLLISic+0FiMWqyINTcoOG0IT8ZG0yKgtQQk5kcwpdgj8vqaL/9ioo57F YKyo6Utdo//yu/LfjvVj30h/qLKOcwPwHPKRTZJ8xsgRztjoOE3+4y6q406ioq90 6t3KtGKX3cCQpu4rOrP9FsdszQKBgQDkJjcOwTmIn2hFhpjSte9+D7WgGx1FyC0Z 0IiKGdqaPXkg4F+JSFbm1Zm3BzQlMZdsWew+2OuqtlQA4lV0aXs40eNGDorVVU8r ERHPRjitl1JOV3Qfwoai8drN3qg73z4vr6BE++v/F4D2KE9ODJMbhI9jqGqRvgSh fRJjMByEZQKBgGCiaMIow2HOiiBfYp2KtM52Kv3WlWC1Ed2wLoUVvcqVvXQ20T2i lv+PGd90p2s7gexjbofRG4j+xHthzfcQNTqx3JISDQe0Bf3LyS1kizXPr4HyFQCQ IITZCQdGcBwyT13GZLKFNAuxSey4u/NIQXSWyXTHK0rvzWL9GJEokGxhAoGBAJnX qwioAsO9ufIn8KUlSXkX3sE3h2WmtcLSez+kb2PST/t7UPChdvfw0NY6ZU8vQYwu lr9KRp+2kq6F8FmvRwfpf/edlsCD5f6EZB4Am/dyMm3hSsyfv2ng7Eol+gtlXemJ 4+igd2Jr1uomBFP85rWHjakFx9a+6yB51gxMRO6BAoGBALMi8f/p0PyUnn7pKrcc 1mA9dn8bvpGOmbFCRDnLXb+zkR695tcSsyeeTvEl1Tdc4mcBk1P3iLYIXvWFhtYL CFRm89SRD/xpvfVqI1pPX+sOrJPOA3KEHjwNzVnlzl4gyck8EMmxXXrKdV7qK+LP agrZ1S4R5RDkbAhslDjck/IJ -----END PRIVATE KEY----- limits-4.4.1/tests/tls/openssl.cnf000066400000000000000000000002431476517132700171540ustar00rootroot00000000000000[ server_cert ] keyUsage = digitalSignature, keyEncipherment nsCertType = server [ client_cert ] keyUsage = digitalSignature, keyEncipherment nsCertType = client limits-4.4.1/tests/tls/redis.crt000066400000000000000000000026071476517132700166270ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIID6DCCAdACFAOcIgSW2heR/Wz1MomoL5bMlVOcMA0GCSqGSIb3DQEBCwUAMDUx EzARBgNVBAoMClJlZGlzIFRlc3QxHjAcBgNVBAMMFUNlcnRpZmljYXRlIEF1dGhv cml0eTAeFw0yNDExMjgyMTE0MDhaFw0yNTExMjgyMTE0MDhaMCwxEzARBgNVBAoM ClJlZGlzIFRlc3QxFTATBgNVBAMMDEdlbmVyaWMtY2VydDCCASIwDQYJKoZIhvcN AQEBBQADggEPADCCAQoCggEBAPpboc5uq+nSgNAZgSGIigxDhSw8hvxgYmVjUtkP KaKhVL9aNILZsMirSBzCxKm5i0QuhNRrfD2KRIx9HZvjJJs3E+sq0hxYTJkrOTz4 1o9P5vR66LC5XiWxdqdnZxdDBKfzzaGQJ0GC6rAqQbieRaQ+kP5It8N7JyX3u28S 3w1hy48mMAlk8D9TySauMsARfreo8SdPLqjg9s3ZuMPlQwihbAC+BAJdNn2Ch1ug qGAsG+b3ODh0G84VkoHBm0TRktdjO+EtzgoGrqVK+shnu/CILRnlGKd98TuQ+QUo c3JBM3WIBc4529PZC5kDeb+8IF2xCktPCBJay0zJ1IyoTAECAwEAATANBgkqhkiG 9w0BAQsFAAOCAgEAyRRflOAqgPYI7mNTpQJXkBT3Bo+7dD4eG7SK97HmPXDXioVE nhYPNfMCGgyBAiUzK+45m/FoPIALmK4H+fL2zQ+JJin4879OIwmsphMWVs5bc78e rEksBnIKOT7x1gOx3WJfR5+ULgMuApXwAY7BDb27uesCu/lw/U9vqz8HMcY/++NX Omeob+3GTvM3yr/yIkg4ofarkYpw1MXB/BrCVrtZebiRQ7iVSoa9CJmTxFpUNO5k ixiSyzbWuUyqk5wY/IaWYF7zBjkBFWYmJk5axVLztDc/Cf78h+/E9cNqfgL/zD+Z lyOGb+oirAmn6tDdDHQ3ggrhl+TyX0sNVC9QAZezZn32qHIQ4LgLTt2rdd60K8z5 iBqc4TL+gLaBNy/7zbz25pB5kcjDax8TEOht6O9T6WK/U2at/OlSQ7Fwx4Y/v/OY pp2j3o4tUTZa+U4b7iHE36HG+YMZv70fg+/GbYzLJyo9VzlJZFfhDjPAnCyyj2mG EgiqPUbzyWnWJh4bveVdC6VG6dyEvt+oIy4nbno0NPKKgSA57jIRNlB9SytEAJjQ pJsZJytDZfjRBZNd14wEawUrSFwMQ2Ym4egeOo/CmpzFHS1NtSQR0etMrt7V1YJn jVeP7nOOcRNxM0CMHymKxo3trQ+4dGpnjzzc2JenDz7tmbdcRWBXJLy82es= -----END CERTIFICATE----- limits-4.4.1/tests/tls/redis.dh000066400000000000000000000006501476517132700164260ustar00rootroot00000000000000-----BEGIN DH PARAMETERS----- MIIBCAKCAQEAuJSCPPLQ4c+VDeKMy6Nwzw65pOzdPUWnZmifgIGr3O7Y+wNOWXNo ahi4vF1bBB840370ksGwb72PnS0v70vmun4+/VbBD8sCseDK7AQSFsvRjJeNtcuC RsyihDWsrN1Fgh9ZKGY4SxjtLuV0us7q9gBf9ZDfwIzbZdjPJRDWStUgVsEcx20q Nq59BjNDF1aZ9jw50mn04xThW5S7dN3SVydwRzfGTmV4T9jfCIDjJpr2XHRI0hL4 C+AkDOdRS55YCzdSzAchR7L7VJ6u55YsTsDodrTscAbRP49RBSAh6yD6A6bfWBZB 3KenFis7NUvwgVwVvSa623ZX7e6pciFgHwIBAg== -----END DH PARAMETERS----- limits-4.4.1/tests/tls/redis.key000066400000000000000000000032541476517132700166260ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQD6W6HObqvp0oDQ GYEhiIoMQ4UsPIb8YGJlY1LZDymioVS/WjSC2bDIq0gcwsSpuYtELoTUa3w9ikSM fR2b4ySbNxPrKtIcWEyZKzk8+NaPT+b0euiwuV4lsXanZ2cXQwSn882hkCdBguqw KkG4nkWkPpD+SLfDeycl97tvEt8NYcuPJjAJZPA/U8kmrjLAEX63qPEnTy6o4PbN 2bjD5UMIoWwAvgQCXTZ9godboKhgLBvm9zg4dBvOFZKBwZtE0ZLXYzvhLc4KBq6l SvrIZ7vwiC0Z5RinffE7kPkFKHNyQTN1iAXOOdvT2QuZA3m/vCBdsQpLTwgSWstM ydSMqEwBAgMBAAECggEAA86F4NIs+gKx47gnjv94/eO+8CPBd6/tfxYqCdPhsa/b ToeMGds+hWaZU5qgi+Ye2HwOl/5EZtkl3q7+VsL26QdG3cIKbNTo4UFXfvZXqNkx rVD7HcjLxi3zHGuFlUR3GyipXUYIWsYnVGNoGWsKqwsqgDtR0/6Y40pnhrJWQulM kKFiMvhbpqG9DBXPm3rIc0ZOCZBF9gSdncBzhYrfH0Owz3yVfvgHgQ2u3XZnHhXN Pa2WCWlnwteV01+nQIGLwKu6R/9W/ZoIa1kEP6Ws9nOQZHNNs89mnSvOW2oRt3YA 3kyFyiZsKUl+dHmXCXbq/wBO65LmTWvD3eL22FtDkwKBgQD74+38jdsYbRM7MFYI OLJcXxRsCK72byoyF8PENvTv9NX4o8RjYs8Sc0XVHRMnxOmzygkE3Bu01wXX5UvN 5ZGw1w0w+C5eNwSMIZjPhPo+FZtfKTRcBO/cSjBGGK0hhzdA76zCMDpthKWa/NTF Ws6ZS1NBewfVXpa/D8LO9XAyvwKBgQD+cU1PhavZ4Q1iuFb16xOfsq+r0RdJPAGe uWh6ZYg88vWw+TJoF4VI4j2+qqNyWgcbj+/xmFidXTfj+4SrjIQF6VlEJA2yohAh Y5mcQcb+JMbS0Uxg5I0zwJZiM2eiMPzCylkfOLmaUFEtkUCvPgTqYk/um6j632vb hFxAuTrxPwKBgQC8bT/9GhzYg9fMAheJZOg/4zxCqKhBXt0gxdhDMp0911gs/E0P z715XtPRXUOUD2CHhs4CXak4OVV8sGFpb57xu515kV4WSm/TZ9qI+j+8K6ujnw40 +9h2YOUFurXgi+bQY7pHNY6vmHfNne6Ih6pe0BNCu4cbnPuFh4PCD6ZTvwKBgQCO cD8nVV1vC13C9g84e/DwZceSF+tBRCZkBMdIPa6W8Cw6dkaRnxkuQ/js0/nL+jog Y+OfC/oPni+7oQIURwVhidA/JS/F4JPHHUrQ/sG3ZD8ppaFsXIXvgtCtCLnrQiE9 qqlZoRfGG1BWggoEEGQQEhlNKjbeulRW/zHlycwFYQKBgQDHgt+9QNsn4gagqCuJ lo9soBVm2pcIOGDcKr0mebQwW1qQOdfNWJ3Zcl5QqxzzUWFgg4021i83NJ9junft XnuEazhYFNJJtPTTcOvJ5Y5Z8StUcYPmGG+HdmQ2NaOPJiLNTafm6MWUAFCErKPu q2G4amTnrAoT117o1U47aUYVxw== -----END PRIVATE KEY----- limits-4.4.1/tests/tls/server.crt000066400000000000000000000030261476517132700170230ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIEUDCCAjigAwIBAgIUEY0m2WL/gu1ybkBrye8J1iMQNEwwDQYJKoZIhvcNAQEL BQAwNTETMBEGA1UECgwKUmVkaXMgVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUg QXV0aG9yaXR5MB4XDTI0MTEyODIxMTQwN1oXDTI1MTEyODIxMTQwN1owKzETMBEG A1UECgwKUmVkaXMgVGVzdDEUMBIGA1UEAwwLU2VydmVyLW9ubHkwggEiMA0GCSqG SIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6iVdoklFNL6js2thYkFm0Q6ew7RFl+5aC 3mAVSrxmTccQOEOd0UM7gFOa0x3GgVqt8ob6SMF0OZiwimAltgszDXYIH1LGnrYB TCNqL8DEXHPeF7ykMeUxVkFORYqplXo3E8qBcETGupBkod6fjdRfDr3i0D3o7g4x ShDa3z2kqH1yeEBkw+7Ily9UHPG4M7L4bFUk6H89snuJMsI2vk8e7Ll4XFPOv3Gu Gx5i8PmxxgYL6K+lcpMFGvG8t7PU09jPAHI7/jcZ7Q+mNoI/O0IaGpdoOOnDd9gL GLx4kxIU8Eh3KrWYQKLQZpvnIYrfl/b32LLR9LzrFt/JaO50b8mxAgMBAAGjYjBg MAsGA1UdDwQEAwIFoDARBglghkgBhvhCAQEEBAMCBkAwHQYDVR0OBBYEFAgVbEPX o4IROc2wbivjVg1vokL2MB8GA1UdIwQYMBaAFAwEX75Bhlwn5xysMWNESWgpeA75 MA0GCSqGSIb3DQEBCwUAA4ICAQDAe9/ByS5o6bnyfnHxjRkOSLEZK7WGN4ev3yXQ wkFXKTDO5uNf5KFeYSKTXiLInoykHt4LY7lp5QXHqvO3fUBYZTql3LGMnwRTGImi tzKXWcrJsnmXkM9R6dvDv6d6xbm6xs3TuhU9/NH6+XfNps0f+YTT5P/it4fQJCVk xNWcrAzxIla/EeD4Es/frQ3nDfaFUgjJ4CdmCvciD2koZOmpmF16S6B2fYndFEPH Qt3KBHfsceTjvG4ja9xaowzKxNXFZDp6adaOlfdyJYPguYWgbEYTrqUJavvKVriT GgtmGeXek6RDKeuQkej/EKYOIY+ADHSnVywyALDz6HUpAK/fRvLF6UJ5Fwe3oMl+ /yJo1QrvIKSM+g71uWikguydTE6I6anlNVwl3fi7gje7uB1xYC5mLmGd0Zb3J/iB uM7i8e3xzf9gHmh/KH70XUqK7j9bfuAQHXOla6abjYSWz5pNWpx650+uEhA9+14o duzKB0NeTx2Jde3RO41q88vkiBdhRjhxqHwXErG5ABusUcohgZQdFSDXAbk75OnW RtdB6ZQ6eF1Ntozlkrh8Y2ueifKUJNF2ux1kX9IsXtkCGGLoW3M0GaxYtAutWnaO ua4MDEZhEzFWi/MOpgSSB6KDF8wUjO0xI9ewmQtnzTYi2naAD60J5NppVsqyZFX7 s4YY3g== -----END CERTIFICATE----- limits-4.4.1/tests/tls/server.key000066400000000000000000000032541476517132700170260ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC6iVdoklFNL6js 2thYkFm0Q6ew7RFl+5aC3mAVSrxmTccQOEOd0UM7gFOa0x3GgVqt8ob6SMF0OZiw imAltgszDXYIH1LGnrYBTCNqL8DEXHPeF7ykMeUxVkFORYqplXo3E8qBcETGupBk od6fjdRfDr3i0D3o7g4xShDa3z2kqH1yeEBkw+7Ily9UHPG4M7L4bFUk6H89snuJ MsI2vk8e7Ll4XFPOv3GuGx5i8PmxxgYL6K+lcpMFGvG8t7PU09jPAHI7/jcZ7Q+m NoI/O0IaGpdoOOnDd9gLGLx4kxIU8Eh3KrWYQKLQZpvnIYrfl/b32LLR9LzrFt/J aO50b8mxAgMBAAECggEAEGcxY4WyVopBXYqlCayv5rUoUtuV8xTbmGh7pIOs64Yd YiTAdWlIczwGng/Ah4sqcS9phSrOsCy8a5HPgKFwfK3LWk1+1u5v7M9IMfc/CUVj /vyxXvLwR5VhKsPcvKrjU2KPJTC9K4iOV/sP+5HaQf2m3wIW35vHbbgooUaOQiNU TYPPo8zIDioqA2ThUmb/vXsSUb48m3+++6dXs3wez0GTAV9+srOm0+B5acmIlEEF IisCrHg1fCFyfbwWhKPEvRhW38GLYmuSnjMU+DgZh6G2Np+1Du0T/NJse0ALGKNs nQrdCoK+oTMd0ZfJZiVhBTYP2wDljy2DIHoD/YlIXQKBgQDtADuWE3B4mezT4mhG 5MO4OS/k8lH6Q+oQHDhNptQKt+svPpy4MIZ9ZNJQzgRHxZcvt1FbVEVZNDJWFYFL dYwPsrKmF1TB0JXjzVf0xfRvM1XgBWv3AqDXd7xL3xwgYrG6Pn3y5+XYZG3i5ZA3 oyXNJr5/OdBrZylxk8Z3EeEaGwKBgQDJfXhzEylyDbHyAfJK0aV01wKFko7GSWoi 3jGcXw9Bzygsw/choN9J3Nar/NQRv7DL+M9wxRO646hNggzzjmYQaa6ugozM16sU 2NaX/PuV1YWwfFiq2fnr1ddt0L7viKSUgivB32gFo0mRkqpKT3fXhbc6V+tvJBuG uwaG3Q8oIwKBgQCL6hFDXiKN8EEyLX1mLPOmtIwj2zKuTp3F+QL8RLr1wPJlJ8+Z pXwgVv8amSVJwcqpbs3m3u855Gi7DWMRS0nuko6JI0j0StHSdI2ygZL2exX9dPXy E2klCxjsqcCPFfTagL4WQvUcNN3yj5f+YKCWZoMb1OPIJFUBKj6GWNXNdQKBgQDG NBrMeEkI3NfyzF9foiM0cF0WsDiqbYVuj0wWGpyTjAhlINB10B5WNlsmDCtA2mQ8 AaFtuS/TZCcnN4st+yIDezJOkDWZO34bn8JY+I8zsTs1hNxkFNB4YX7tk65/Stc7 D2Gj8PQOt7Wi8bd6KfollG/NgLckOUyqmEevWr2SKQKBgQCCeFYSTuCSP7SDG5Fw +JG8nVYOw7FMRx5X+PikCYyMS2AqgeQtHIF1JG0ItHlX2gXy4mvTREBRDJiFlETj m2OhMabM9y129MKptQcAgi+8+z50QeHd/nZ946ZizNqurVGmbNRWSU6Vqab3rxhm bsUgeorTujbF115oPGxbYNx13Q== -----END PRIVATE KEY----- limits-4.4.1/tests/utils.py000066400000000000000000000430561476517132700157220ustar00rootroot00000000000000from __future__ import annotations import asyncio import contextlib import functools import math import os import time import pytest from pytest_lazy_fixtures import lf from limits.limits import RateLimitItem ASYNC_REDIS_IMPLEMENTATION = os.environ.get("ASYNC_REDIS_IMPLEMENTATION", "coredis") def fixed_start(fn): @functools.wraps(fn) def __inner(*a, **k): start = time.time() while time.time() < math.ceil(start): time.sleep(0.01) return fn(*a, **k) return __inner def timestamp_based_key_ttl(item: RateLimitItem, now: float | None = None) -> float: """ Return the current timestamp-based key TTL. Used for some implementations of thesliding window counter that generates keys based on the timestamp. Args: item (RateLimitItem): the rate limit item now (Optional[float], optional): the current timestamp. If None, generates the current timestamp """ if now is None: now = time.time() return item.get_expiry() - (now % item.get_expiry()) def async_fixed_start(fn): @functools.wraps(fn) async def __inner(*a, **k): start = time.time() while time.time() < math.ceil(start): time.sleep(0.01) return await fn(*a, **k) return __inner @contextlib.contextmanager def window(delay_end: float, delay: float | None = None): start = time.time() if delay is not None: while time.time() - start < delay: time.sleep(0.001) yield (start, start + delay_end) while time.time() - start < delay_end: time.sleep(0.001) @contextlib.asynccontextmanager async def async_window(delay_end: float, delay: float | None = None): start = time.time() if delay is not None: while time.time() - start < delay: await asyncio.sleep(0.001) yield (start, start + delay_end) while time.time() - start < delay_end: await asyncio.sleep(0.001) all_storage = pytest.mark.parametrize( "uri, args, fixture", [ pytest.param("memory://", {}, None, marks=pytest.mark.memory, id="in-memory"), pytest.param( "redis://localhost:7379", {}, lf("redis_basic"), marks=pytest.mark.redis, id="redis_basic", ), pytest.param( "memcached://localhost:22122", {}, lf("memcached"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached", ), pytest.param( "memcached://localhost:22122,localhost:22123", {}, lf("memcached_cluster"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached-cluster", ), pytest.param( "redis+cluster://localhost:7001/", {}, lf("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "redis+cluster://:sekret@localhost:8400/", {}, lf("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "redis+cluster://localhost:8301", { "ssl": True, "ssl_cert_reqs": "required", "ssl_keyfile": "./tests/tls/client.key", "ssl_certfile": "./tests/tls/client.crt", "ssl_ca_certs": "./tests/tls/ca.crt", }, lf("redis_ssl_cluster"), marks=pytest.mark.redis_cluster, id="redis-ssl-cluster", ), pytest.param( "redis+sentinel://localhost:26379/mymaster", {"use_replicas": False}, lf("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "mongodb://localhost:37017/", {}, lf("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "etcd://localhost:2379", {}, lf("etcd"), marks=[pytest.mark.etcd, pytest.mark.flaky], id="etcd", ), pytest.param( "valkey://localhost:12379", {}, lf("valkey_basic"), marks=pytest.mark.valkey, id="valkey_basic", ), pytest.param( "valkey+cluster://localhost:2001/", {}, lf("valkey_cluster"), marks=pytest.mark.valkey_cluster, id="valkey-cluster", ), ], ) moving_window_storage = pytest.mark.parametrize( "uri, args, fixture", [ pytest.param("memory://", {}, None, marks=pytest.mark.memory, id="in-memory"), pytest.param( "redis://localhost:7379", {}, lf("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "redis+cluster://localhost:7001/", {}, lf("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "redis+cluster://:sekret@localhost:8400/", {}, lf("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "redis+cluster://localhost:8301", { "ssl": True, "ssl_cert_reqs": "required", "ssl_keyfile": "./tests/tls/client.key", "ssl_certfile": "./tests/tls/client.crt", "ssl_ca_certs": "./tests/tls/ca.crt", }, lf("redis_ssl_cluster"), marks=pytest.mark.redis_cluster, id="redis-ssl-cluster", ), pytest.param( "redis+sentinel://localhost:26379/mymaster", {"use_replicas": False}, lf("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "mongodb://localhost:37017/", {}, lf("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "valkey://localhost:12379", {}, lf("valkey_basic"), marks=pytest.mark.valkey, id="valkey_basic", ), pytest.param( "valkey+cluster://localhost:2001/", {}, lf("valkey_cluster"), marks=pytest.mark.valkey_cluster, id="valkey-cluster", ), ], ) sliding_window_counter_storage = pytest.mark.parametrize( "uri, args, fixture", [ pytest.param("memory://", {}, None, marks=pytest.mark.memory, id="in-memory"), pytest.param( "redis://localhost:7379", {}, lf("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "memcached://localhost:22122", {}, lf("memcached"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached", ), pytest.param( "memcached://localhost:22122,localhost:22123", {}, lf("memcached_cluster"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached-cluster", ), pytest.param( "redis+cluster://localhost:7001/", {}, lf("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "redis+cluster://:sekret@localhost:8400/", {}, lf("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "redis+cluster://localhost:8301", { "ssl": True, "ssl_cert_reqs": "required", "ssl_keyfile": "./tests/tls/client.key", "ssl_certfile": "./tests/tls/client.crt", "ssl_ca_certs": "./tests/tls/ca.crt", }, lf("redis_ssl_cluster"), marks=pytest.mark.redis_cluster, id="redis-ssl-cluster", ), pytest.param( "redis+sentinel://localhost:26379/mymaster", {"use_replicas": False}, lf("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "mongodb://localhost:37017/", {}, lf("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "valkey://localhost:12379", {}, lf("valkey_basic"), marks=pytest.mark.valkey, id="valkey_basic", ), pytest.param( "valkey+cluster://localhost:2001/", {}, lf("valkey_cluster"), marks=pytest.mark.valkey_cluster, id="valkey-cluster", ), ], ) async_all_storage = pytest.mark.parametrize( "uri, args, fixture", [ pytest.param( "async+memory://", {}, None, marks=pytest.mark.memory, id="in-memory" ), pytest.param( "async+redis://localhost:7379", { "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "async+memcached://localhost:22122", {}, lf("memcached"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached", ), pytest.param( "async+memcached://localhost:22122,localhost:22123", {}, lf("memcached_cluster"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached-cluster", ), pytest.param( "async+redis+cluster://localhost:7001/", { "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "async+redis+cluster://:sekret@localhost:8400/", { "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "async+redis+cluster://localhost:8301", { "ssl": True, "ssl_cert_reqs": "required", "ssl_keyfile": "./tests/tls/client.key", "ssl_certfile": "./tests/tls/client.crt", "ssl_ca_certs": "./tests/tls/ca.crt", "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_ssl_cluster"), marks=pytest.mark.redis_cluster, id="redis-ssl-cluster", ), pytest.param( "async+redis+sentinel://localhost:26379/mymaster", { "use_replicas": False, "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "async+mongodb://localhost:37017/", {}, lf("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "async+etcd://localhost:2379", {}, lf("etcd"), marks=[pytest.mark.etcd, pytest.mark.flaky], id="etcd", ), pytest.param( "async+valkey://localhost:12379", {}, lf("valkey_basic"), marks=pytest.mark.valkey, id="valkey_basic", ), pytest.param( "async+valkey+cluster://localhost:2001/", {}, lf("valkey_cluster"), marks=pytest.mark.valkey_cluster, id="valkey-cluster", ), ], ) async_moving_window_storage = pytest.mark.parametrize( "uri, args, fixture", [ pytest.param( "async+memory://", {}, None, marks=pytest.mark.memory, id="in-memory" ), pytest.param( "async+redis://localhost:7379", { "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "async+redis+cluster://localhost:7001/", { "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "async+redis+cluster://:sekret@localhost:8400/", { "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "async+redis+cluster://localhost:8301", { "ssl": True, "ssl_cert_reqs": "required", "ssl_keyfile": "./tests/tls/client.key", "ssl_certfile": "./tests/tls/client.crt", "ssl_ca_certs": "./tests/tls/ca.crt", "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_ssl_cluster"), marks=pytest.mark.redis_cluster, id="redis-ssl-cluster", ), pytest.param( "async+redis+sentinel://localhost:26379/mymaster", { "use_replicas": False, "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "async+mongodb://localhost:37017/", {}, lf("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "async+valkey://localhost:12379", {}, lf("valkey_basic"), marks=pytest.mark.valkey, id="valkey_basic", ), pytest.param( "async+valkey+cluster://localhost:2001/", {}, lf("valkey_cluster"), marks=pytest.mark.valkey_cluster, id="valkey-cluster", ), ], ) async_sliding_window_counter_storage = pytest.mark.parametrize( "uri, args, fixture", [ pytest.param( "async+memory://", {}, None, marks=pytest.mark.memory, id="in-memory" ), pytest.param( "async+redis://localhost:7379", {"implementation": ASYNC_REDIS_IMPLEMENTATION}, lf("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "async+memcached://localhost:22122", {}, lf("memcached"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached", ), pytest.param( "async+memcached://localhost:22122,localhost:22123", {}, lf("memcached_cluster"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached-cluster", ), pytest.param( "async+redis+cluster://localhost:7001/", {"implementation": ASYNC_REDIS_IMPLEMENTATION}, lf("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "async+redis+cluster://:sekret@localhost:8400/", {"implementation": ASYNC_REDIS_IMPLEMENTATION}, lf("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "async+redis+cluster://localhost:8301", { "ssl": True, "ssl_cert_reqs": "required", "ssl_keyfile": "./tests/tls/client.key", "ssl_certfile": "./tests/tls/client.crt", "ssl_ca_certs": "./tests/tls/ca.crt", "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_ssl_cluster"), marks=pytest.mark.redis_cluster, id="redis-ssl-cluster", ), pytest.param( "async+redis+sentinel://localhost:26379/mymaster", { "use_replicas": False, "implementation": ASYNC_REDIS_IMPLEMENTATION, }, lf("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "async+mongodb://localhost:37017/", {}, lf("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "async+valkey://localhost:12379", {}, lf("valkey_basic"), marks=pytest.mark.valkey, id="valkey_basic", ), pytest.param( "async+valkey+cluster://localhost:2001/", {}, lf("valkey_cluster"), marks=pytest.mark.valkey_cluster, id="valkey-cluster", ), ], ) limits-4.4.1/versioneer.py000066400000000000000000002512251476517132700156000ustar00rootroot00000000000000 # Version: 0.29 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/python-versioneer/python-versioneer * Brian Warner * License: Public Domain (Unlicense) * Compatible with: Python 3.7, 3.8, 3.9, 3.10, 3.11 and pypy3 * [![Latest Version][pypi-image]][pypi-url] * [![Build Status][travis-image]][travis-url] This is a tool for managing a recorded version number in setuptools-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install Versioneer provides two installation modes. The "classic" vendored mode installs a copy of versioneer into your repository. The experimental build-time dependency mode is intended to allow you to skip this step and simplify the process of upgrading. ### Vendored mode * `pip install versioneer` to somewhere in your $PATH * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is available, so you can also use `conda install -c conda-forge versioneer` * add a `[tool.versioneer]` section to your `pyproject.toml` or a `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) * Note that you will need to add `tomli; python_version < "3.11"` to your build-time dependencies if you use `pyproject.toml` * run `versioneer install --vendor` in your source tree, commit the results * verify version information with `python setup.py version` ### Build-time dependency mode * `pip install versioneer` to somewhere in your $PATH * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is available, so you can also use `conda install -c conda-forge versioneer` * add a `[tool.versioneer]` section to your `pyproject.toml` or a `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) * add `versioneer` (with `[toml]` extra, if configuring in `pyproject.toml`) to the `requires` key of the `build-system` table in `pyproject.toml`: ```toml [build-system] requires = ["setuptools", "versioneer[toml]"] build-backend = "setuptools.build_meta" ``` * run `versioneer install --no-vendor` in your source tree, commit the results * verify version information with `python setup.py version` ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes). The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/python-versioneer/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other languages) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg` and `pyproject.toml`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install --[no-]vendor` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## Similar projects * [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time dependency * [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of versioneer * [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools plugin ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the "Unlicense", as described in https://unlicense.org/. [pypi-image]: https://img.shields.io/pypi/v/versioneer.svg [pypi-url]: https://pypi.python.org/pypi/versioneer/ [travis-image]: https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg [travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer """ # pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring # pylint:disable=missing-class-docstring,too-many-branches,too-many-statements # pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error # pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with # pylint:disable=attribute-defined-outside-init,too-many-arguments import configparser import errno import json import os import re import subprocess import sys from pathlib import Path from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union from typing import NoReturn import functools have_tomllib = True if sys.version_info >= (3, 11): import tomllib else: try: import tomli as tomllib except ImportError: have_tomllib = False class VersioneerConfig: """Container for Versioneer configuration parameters.""" VCS: str style: str tag_prefix: str versionfile_source: str versionfile_build: Optional[str] parentdir_prefix: Optional[str] verbose: Optional[bool] def get_root() -> str: """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") pyproject_toml = os.path.join(root, "pyproject.toml") versioneer_py = os.path.join(root, "versioneer.py") if not ( os.path.exists(setup_py) or os.path.exists(pyproject_toml) or os.path.exists(versioneer_py) ): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") pyproject_toml = os.path.join(root, "pyproject.toml") versioneer_py = os.path.join(root, "versioneer.py") if not ( os.path.exists(setup_py) or os.path.exists(pyproject_toml) or os.path.exists(versioneer_py) ): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. my_path = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(my_path)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir and "VERSIONEER_PEP518" not in globals(): print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(my_path), versioneer_py)) except NameError: pass return root def get_config_from_root(root: str) -> VersioneerConfig: """Read the project setup.cfg file to determine Versioneer config.""" # This might raise OSError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . root_pth = Path(root) pyproject_toml = root_pth / "pyproject.toml" setup_cfg = root_pth / "setup.cfg" section: Union[Dict[str, Any], configparser.SectionProxy, None] = None if pyproject_toml.exists() and have_tomllib: try: with open(pyproject_toml, 'rb') as fobj: pp = tomllib.load(fobj) section = pp['tool']['versioneer'] except (tomllib.TOMLDecodeError, KeyError) as e: print(f"Failed to load config from {pyproject_toml}: {e}") print("Try to load it from setup.cfg") if not section: parser = configparser.ConfigParser() with open(setup_cfg) as cfg_file: parser.read_file(cfg_file) parser.get("versioneer", "VCS") # raise error if missing section = parser["versioneer"] # `cast`` really shouldn't be used, but its simplest for the # common VersioneerConfig users at the moment. We verify against # `None` values elsewhere where it matters cfg = VersioneerConfig() cfg.VCS = section['VCS'] cfg.style = section.get("style", "") cfg.versionfile_source = cast(str, section.get("versionfile_source")) cfg.versionfile_build = section.get("versionfile_build") cfg.tag_prefix = cast(str, section.get("tag_prefix")) if cfg.tag_prefix in ("''", '""', None): cfg.tag_prefix = "" cfg.parentdir_prefix = section.get("parentdir_prefix") if isinstance(section, configparser.SectionProxy): # Make sure configparser translates to bool cfg.verbose = section.getboolean("verbose") else: cfg.verbose = section.get("verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" HANDLERS.setdefault(vcs, {})[method] = f return f return decorate def run_command( commands: List[str], args: List[str], cwd: Optional[str] = None, verbose: bool = False, hide_stderr: bool = False, env: Optional[Dict[str, str]] = None, ) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs: Dict[str, Any] = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs) break except OSError as e: if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, process.returncode return stdout, process.returncode LONG_VERSION_PY['git'] = r''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. # Generated by versioneer-0.29 # https://github.com/python-versioneer/python-versioneer """Git implementation of _version.py.""" import errno import os import re import subprocess import sys from typing import Any, Callable, Dict, List, Optional, Tuple import functools def get_keywords() -> Dict[str, str]: """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" VCS: str style: str tag_prefix: str parentdir_prefix: str versionfile_source: str verbose: bool def get_config() -> VersioneerConfig: """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command( commands: List[str], args: List[str], cwd: Optional[str] = None, verbose: bool = False, hide_stderr: bool = False, env: Optional[Dict[str, str]] = None, ) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs: Dict[str, Any] = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs) break except OSError as e: if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, process.returncode return stdout, process.returncode def versions_from_parentdir( parentdir_prefix: str, root: str, verbose: bool, ) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords: Dict[str, str] = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords( keywords: Dict[str, str], tag_prefix: str, verbose: bool, ) -> Dict[str, Any]: """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r'\d', r): continue if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs( tag_prefix: str, root: str, verbose: bool, runner: Callable = run_command ) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner(GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", f"{tag_prefix}[[:digit:]]*" ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces: Dict[str, Any]) -> str: """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces: Dict[str, Any]) -> str: """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%%d.dev%%d" %% (post_version + 1, pieces["distance"]) else: rendered += ".post0.dev%%d" %% (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions() -> Dict[str, Any]: """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords: Dict[str, str] = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords( keywords: Dict[str, str], tag_prefix: str, verbose: bool, ) -> Dict[str, Any]: """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r'\d', r): continue if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs( tag_prefix: str, root: str, verbose: bool, runner: Callable = run_command ) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner(GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", f"{tag_prefix}[[:digit:]]*" ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(versionfile_source: str, ipy: Optional[str]) -> None: """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [versionfile_source] if ipy: files.append(ipy) if "VERSIONEER_PEP518" not in globals(): try: my_path = __file__ if my_path.endswith((".pyc", ".pyo")): my_path = os.path.splitext(my_path)[0] + ".py" versioneer_file = os.path.relpath(my_path) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: with open(".gitattributes", "r") as fobj: for line in fobj: if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True break except OSError: pass if not present: with open(".gitattributes", "a+") as fobj: fobj.write(f"{versionfile_source} export-subst\n") files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir( parentdir_prefix: str, root: str, verbose: bool, ) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.29) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename: str) -> Dict[str, Any]: """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename: str, versions: Dict[str, Any]) -> None: """Write the given version number to the given _version.py file.""" contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces: Dict[str, Any]) -> str: """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces: Dict[str, Any]) -> str: """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) else: rendered += ".post0.dev%d" % (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose: bool = False) -> Dict[str, Any]: """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or bool(cfg.verbose) # `bool()` used to avoid `None` assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version() -> str: """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(cmdclass: Optional[Dict[str, Any]] = None): """Get the custom setuptools subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument. """ if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/python-versioneer/python-versioneer/issues/52 cmds = {} if cmdclass is None else cmdclass.copy() # we add "version" to setuptools from setuptools import Command class cmd_version(Command): description = "report generated version string" user_options: List[Tuple[str, str, str]] = [] boolean_options: List[str] = [] def initialize_options(self) -> None: pass def finalize_options(self) -> None: pass def run(self) -> None: vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # pip install -e . and setuptool/editable_wheel will invoke build_py # but the build_py command is not expected to copy any files. # we override different "build_py" commands for both environments if 'build_py' in cmds: _build_py: Any = cmds['build_py'] else: from setuptools.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) if getattr(self, "editable_mode", False): # During editable installs `.py` and data files are # not copied to build_lib return # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if 'build_ext' in cmds: _build_ext: Any = cmds['build_ext'] else: from setuptools.command.build_ext import build_ext as _build_ext class cmd_build_ext(_build_ext): def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_ext.run(self) if self.inplace: # build_ext --inplace will only build extensions in # build/lib<..> dir with no _version.py to write to. # As in place builds will already have a _version.py # in the module dir, we do not need to write one. return # now locate _version.py in the new build/ directory and replace # it with an updated value if not cfg.versionfile_build: return target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) if not os.path.exists(target_versionfile): print(f"Warning: {target_versionfile} does not exist, skipping " "version update. This can happen if you are running build_ext " "without first running build_py.") return print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_ext"] = cmd_build_ext if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # type: ignore # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.setuptools_buildexe import py2exe as _py2exe # type: ignore except ImportError: from py2exe.distutils_buildexe import py2exe as _py2exe # type: ignore class cmd_py2exe(_py2exe): def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # sdist farms its file list building out to egg_info if 'egg_info' in cmds: _egg_info: Any = cmds['egg_info'] else: from setuptools.command.egg_info import egg_info as _egg_info class cmd_egg_info(_egg_info): def find_sources(self) -> None: # egg_info.find_sources builds the manifest list and writes it # in one shot super().find_sources() # Modify the filelist and normalize it root = get_root() cfg = get_config_from_root(root) self.filelist.append('versioneer.py') if cfg.versionfile_source: # There are rare cases where versionfile_source might not be # included by default, so we must be explicit self.filelist.append(cfg.versionfile_source) self.filelist.sort() self.filelist.remove_duplicates() # The write method is hidden in the manifest_maker instance that # generated the filelist and was thrown away # We will instead replicate their final normalization (to unicode, # and POSIX-style paths) from setuptools import unicode_utils normalized = [unicode_utils.filesys_decode(f).replace(os.sep, '/') for f in self.filelist.files] manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt') with open(manifest_filename, 'w') as fobj: fobj.write('\n'.join(normalized)) cmds['egg_info'] = cmd_egg_info # we override different "sdist" commands for both environments if 'sdist' in cmds: _sdist: Any = cmds['sdist'] else: from setuptools.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self) -> None: versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir: str, files: List[str]) -> None: root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ OLD_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ INIT_PY_SNIPPET = """ from . import {0} __version__ = {0}.get_versions()['version'] """ def do_setup() -> int: """Do main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (OSError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") maybe_ipy: Optional[str] = ipy if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except OSError: old = "" module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] snippet = INIT_PY_SNIPPET.format(module) if OLD_SNIPPET in old: print(" replacing boilerplate in %s" % ipy) with open(ipy, "w") as f: f.write(old.replace(OLD_SNIPPET, snippet)) elif snippet not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(snippet) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) maybe_ipy = None # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(cfg.versionfile_source, maybe_ipy) return 0 def scan_setup_py() -> int: """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors def setup_command() -> NoReturn: """Set up Versioneer and exit with appropriate error code.""" errors = do_setup() errors += scan_setup_py() sys.exit(1 if errors else 0) if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": setup_command()