pax_global_header00006660000000000000000000000064146201727220014515gustar00rootroot0000000000000052 comment=ff28751a2326de0ad6a978e316397534acf29b81 limits-3.12.0/000077500000000000000000000000001462017272200131015ustar00rootroot00000000000000limits-3.12.0/.coveragerc000066400000000000000000000004271462017272200152250ustar00rootroot00000000000000[run] omit = /**/limits/_version* /**/tests/* setup.py versioneer.py [report] exclude_lines = pragma: no cover noqa raise NotImplementedError @overload @abstractmethod if TYPE_CHECKING if typing.TYPE_CHECKING class .*\bProtocol\): limits-3.12.0/.gitattributes000066400000000000000000000001121462017272200157660ustar00rootroot00000000000000flask_ratelimits/_version.py export-subst limits/_version.py export-subst limits-3.12.0/.github/000077500000000000000000000000001462017272200144415ustar00rootroot00000000000000limits-3.12.0/.github/FUNDING.yml000066400000000000000000000000521462017272200162530ustar00rootroot00000000000000github: alisaifee open_collective: limits limits-3.12.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001462017272200166245ustar00rootroot00000000000000limits-3.12.0/.github/ISSUE_TEMPLATE/bug-report.md000066400000000000000000000012501462017272200212320ustar00rootroot00000000000000--- name: Bug Report about: Submit a bug report labels: 'bug' --- ## Expected Behaviour ## Current Behaviour ## Steps to Reproduce 1. 1. 1. 1. ## Your Environment - limits version: - Operating system: limits-3.12.0/.github/ISSUE_TEMPLATE/feature.md000066400000000000000000000005131462017272200206000ustar00rootroot00000000000000--- name: Feature or Enhancement about: Propose a new feature or enhancement labels: 'enhancement' --- ## Expected Behaviour limits-3.12.0/.github/dependabot.yml000066400000000000000000000006551462017272200172770ustar00rootroot00000000000000# To get started with Dependabot version updates, you'll need to specify which # package ecosystems to update and where the package manifests are located. # Please see the documentation for all configuration options: # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates version: 2 updates: - package-ecosystem: "pip" directory: "/" schedule: interval: "daily" limits-3.12.0/.github/workflows/000077500000000000000000000000001462017272200164765ustar00rootroot00000000000000limits-3.12.0/.github/workflows/compatibility.yml000066400000000000000000000065671462017272200221100ustar00rootroot00000000000000name: Compatibility on: create: tags: ['*'] push: branches: ['master'] schedule: - cron: 0 23 * * * jobs: test: runs-on: ubuntu-latest strategy: fail-fast: false matrix: service_version: ["ALL_LATEST=true"] marker: [not integration] include: - service_version: "LIMITS_REDIS_SERVER_SSL_VERSION=7.2" marker: "redis" - service_version: "LIMITS_REDIS_SERVER_SSL_VERSION=7.0" marker: "redis" - service_version: "LIMITS_REDIS_SERVER_SSL_VERSION=6.2.6" marker: "redis" - service_version: "LIMITS_REDIS_SERVER_SSL_VERSION=6.0.16" marker: "redis" - service_version: "LIMITS_REDIS_SERVER_VERSION=7.2" marker: "redis" - service_version: "LIMITS_REDIS_SERVER_VERSION=7.0" marker: "redis" - service_version: "LIMITS_REDIS_SERVER_VERSION=6.2.6" marker: "redis" - service_version: "LIMITS_REDIS_SERVER_VERSION=6.0.16" marker: "redis" - service_version: "LIMITS_REDIS_SENTINEL_SERVER_VERSION=7.2" marker: "redis_sentinel" - service_version: "LIMITS_REDIS_SENTINEL_SERVER_VERSION=7.0" marker: "redis_sentinel" - service_version: "LIMITS_REDIS_SENTINEL_SERVER_VERSION=6.2.6" marker: "redis_sentinel" - service_version: "LIMITS_REDIS_SENTINEL_SERVER_VERSION=6.0.16" marker: "redis_sentinel" - service_version: "LIMITS_MONGODB_SERVER_VERSION=7.0.5" marker: "mongodb" - service_version: "LIMITS_MONGODB_SERVER_VERSION=6.0.3" marker: "mongodb" - service_version: "LIMITS_MONGODB_SERVER_VERSION=5.0.3" marker: "mongodb" - service_version: "LIMITS_MONGODB_SERVER_VERSION=4.4.9" marker: "mongodb" - service_version: "LIMITS_MONGODB_SERVER_VERSION=4.2.17" marker: "mongodb" - service_version: "LIMITS_MEMCACHED_SERVER_VERSION=1.6.15" marker: "memcached" - service_version: "LIMITS_MEMCACHED_SERVER_VERSION=1.6.6" marker: "memcached" - service_version: "LIMITS_MEMCACHED_SERVER_VERSION=1.5.16" marker: "memcached" - service_version: "LIMITS_MEMCACHED_SERVER_VERSION=1.4.34" marker: "memcached" - service_version: "LIMITS_ETCD_SERVER_VERSION=3.5" marker: "etcd" - service_version: "LIMITS_ETCD_SERVER_VERSION=3.4" marker: "etcd" - service_version: "LIMITS_ETCD_SERVER_VERSION=3.3" marker: "etcd" steps: - uses: actions/checkout@v4 - name: Cache dependencies uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements/**') }} restore-keys: | ${{ runner.os }}-pip- - name: Set up Python uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install -U pip python -m pip install -U setuptools wheel pip install --no-binary protobuf -r requirements/ci.txt - name: Tests env: CI: "True" run: | eval "export ${{ matrix.service_version }}" py.test -m "${{ matrix.marker }} and not benchmark" --cov-report=xml --cov-branch --max-runs=3 limits-3.12.0/.github/workflows/main.yml000066400000000000000000000137141462017272200201530ustar00rootroot00000000000000name: CI on: [push, pull_request] jobs: lint: runs-on: ubuntu-latest strategy: matrix: python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v4 - name: Cache dependencies uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements/**') }} restore-keys: | ${{ runner.os }}-pip- - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install -U pip python -m pip install -U setuptools wheel pip install --no-binary protobuf -r requirements/ci.txt - name: Lint with ruff run: | ruff check --select I ruff format --check limits tests ruff limits tests - name: Check types run: | mypy limits test: strategy: fail-fast: false matrix: python-version: [3.8] marker: [not (integration or benchmark)] os: [ubuntu-latest] include: - python-version: 3.9 marker: 'not benchmark' os: ubuntu-latest - python-version: "3.10" marker: 'not benchmark' os: ubuntu-latest - python-version: "3.11" marker: 'not benchmark' os: ubuntu-latest - python-version: "3.12" marker: 'not benchmark' os: ubuntu-latest runs-on: "${{ matrix.os }}" steps: - uses: actions/checkout@v4 - name: Cache dependencies uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements/**') }} restore-keys: | ${{ runner.os }}-pip- - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install -U pip python -m pip install -U setuptools wheel pip install --no-binary protobuf -r requirements/ci.txt - name: Tests env: CI: "True" run: | py.test -m "${{ matrix.marker }}" --cov-report=xml --cov-branch --max-runs=3 - name: Upload coverage to Codecov uses: codecov/codecov-action@v4.2.0 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} benchmark: needs: [test] runs-on: ubuntu-latest strategy: matrix: python-version: ["3.11"] steps: - uses: actions/checkout@v4 - name: Cache dependencies uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements/**') }} restore-keys: | ${{ runner.os }}-pip- - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install "pip<22" python -m pip install --upgrade setuptools wheel pip install --no-binary protobuf -r requirements/ci.txt - name: Benchmark env: CI: "True" run: | py.test -W ignore -m "benchmark" --benchmark-min-rounds=1000 --benchmark-histogram=.benchmarks/benchmark - name: Upload benchmark results uses: actions/upload-artifact@v2 with: name: benchmark path: .benchmarks/* build_wheels: needs: [lint] name: Build wheel runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Build wheels run: | python -m pip install build python -m build --wheel - uses: actions/upload-artifact@v2 with: path: ./dist/*.whl build_sdist: needs: [lint] name: Build source distribution runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Build sdist run: | pipx run build --sdist - uses: actions/upload-artifact@v2 with: path: dist/*.tar.gz upload_pypi: needs: [test, build_wheels, build_sdist] runs-on: ubuntu-latest if: github.ref == 'refs/heads/master' steps: - uses: actions/download-artifact@v2 with: name: artifact path: dist - uses: pypa/gh-action-pypi-publish@v1.4.2 with: user: __token__ password: ${{ secrets.TEST_PYPI_API_TOKEN }} repository_url: https://test.pypi.org/legacy/ skip_existing: true upload_pypi_release: needs: [test, benchmark, build_wheels, build_sdist] runs-on: ubuntu-latest if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: - uses: actions/download-artifact@v2 with: name: artifact path: dist - uses: pypa/gh-action-pypi-publish@v1.4.2 with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} github_release: needs: [upload_pypi_release] name: Create Release runs-on: ubuntu-latest if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: - name: Checkout code uses: actions/checkout@v4 with: fetch-depth: 0 - name: Download artifacts uses: actions/download-artifact@v2 with: name: artifact path: dist - name: Download benchmark results uses: actions/download-artifact@v2 with: name: benchmark path: .benchmarks - name: Generate release notes run: | ./scripts/github_release_notes.sh > release_notes.md - name: Create Release uses: ncipollo/release-action@v1 with: artifacts: "dist/*,.benchmarks/*" bodyFile: release_notes.md token: ${{ secrets.GITHUB_TOKEN }} limits-3.12.0/.gitignore000066400000000000000000000002031462017272200150640ustar00rootroot00000000000000*.pyc *.log cover/* .mypy_cache/* .coverage* .test_env .tool-versions .idea build/ dist/ htmlcov *egg-info* .python-version .*.swp limits-3.12.0/.readthedocs.yml000066400000000000000000000005201462017272200161640ustar00rootroot00000000000000version: 2 build: os: ubuntu-20.04 tools: python: "3.9" # You can also specify other tool versions: # nodejs: "16" # rust: "1.55" # golang: "1.17" # Build documentation in the docs/ directory with Sphinx sphinx: configuration: doc/source/conf.py python: install: - requirements: requirements/docs.txt limits-3.12.0/CLASSIFIERS000066400000000000000000000010121462017272200145650ustar00rootroot00000000000000Development Status :: 5 - Production/Stable Intended Audience :: Developers License :: OSI Approved :: MIT License Operating System :: MacOS Operating System :: POSIX :: Linux Operating System :: OS Independent Topic :: Software Development :: Libraries :: Python Modules Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Programming Language :: Python :: Implementation :: PyPy limits-3.12.0/CODE_OF_CONDUCT.md000066400000000000000000000121221462017272200156760ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. limits-3.12.0/CONTRIBUTIONS.rst000066400000000000000000000003521462017272200156750ustar00rootroot00000000000000Contributors ============ - `Timothee Groleau `_ - `Zehua Liu `_ - `David Czarnecki `_ - `Laurent Savaete `_ limits-3.12.0/HISTORY.rst000066400000000000000000000251021462017272200147740ustar00rootroot00000000000000.. :changelog: Changelog ========= v3.12.0 ------- Release Date: 2024-05-12 * Enhancements * Lazily initialize pymongo client * Documentation * Add django-ratelimiter in docs * Chores * Update development dependencies * Update github actions to latest v3.11.0 ------- Release Date: 2024-04-20 * Compatibility * Add support for python 3.12 v3.10.1 ------- Release Date: 2024-03-17 * Compatibility * Relax dependency constraint on packaging v3.10.0 ------- Release Date: 2024-03-08 * Bug Fix * Fix incorrect mapping of coredis exceptions * Fix calculation of reset_time v3.9.0 ------ Release Date: 2024-02-17 * Bug Fix * Remove excessively low defaults for mongodb storage and instead delegate to the underlying dependency (pymongo, motor) v3.8.0 ------ Release Date: 2024-02-14 * Features * Add option to wrap storage errors with a ``StorageError`` exception v3.7.0 ------ Release Date: 2023-11-24 * Features * Ensure rate limit keys in redis use are prefixed with a `LIMITS` prefix. This allows for resetting all keys generated by the library without implicit knowledge of the key structure. v3.6.0 ------ Release Date: 2023-08-31 * Bug Fix * Remove default socket timeout from mongo storage * Ensure _version.py has stable content when generated using `git archive` from a tag regardless of when it is run. * Compatibility * Remove references to python 3.7 * Remove unnecessary setuptools dependency v3.5.0 ------ Release Date: 2023-05-16 * Bug Fix * Handle ``cost`` > 8000 when using redis * Remove arbitrary default timeout for redis+sentinel v3.4.0 ------ Release Date: 2023-04-17 * Bug Fix * Remove use of weakreferences to storages in strategy classes as this was not documented or required and led to usability issues. * Chores * Update documentation dependencies * Remove unused gcra lua script v3.3.1 ------ Release Date: 2023-03-22 * Compatibility * Block incompatible versions of redis-py * Chores * Force error on warnings in tests v3.3.0 ------ Release Date: 2023-03-20 * Compatibility * Remove deprecated use of `pkg_resources` and switch to `importlib_resource` * Chores * Update documentation dependencies * Update github actions versions v3.2.0 ------ Release Date: 2023-01-24 * Bug Fix * Fix handling of authentication details in storage url of redis cluster * Chores * Add test coverage for redis cluster with auth required v3.1.6 ------ Release Date: 2023-01-16 * Bug Fix * Disallow acquiring amounts > limit in moving window * Usability * Use a named tuple for the response from `RateLimiter.get_window_stats` v3.1.5 ------ Release Date: 2023-01-12 * Performance * Reduce rpc calls to etcd for counter increment * Compatibility * Relax version requirements for packaging dependency * Chores * Improve benchmark outputs * Improve documentation for etcd v3.1.4 ------ Release Date: 2023-01-06 * Chores * Fix benchmark result artifact capture v3.1.3 ------ Release Date: 2023-01-06 * Chores * Fix benchmark result artifact capture v3.1.2 ------ Release Date: 2023-01-06 * Chores * Collapse benchmark & ci workflows v3.1.1 ------ Release Date: 2023-01-06 * Chores * Fix compatibility tests for etcd in CI * Improve visual identifiers of tests * Add benchmark tests in CI v3.1.0 ------ Release Date: 2023-01-05 * Compatibility * Increase minimum version of pymongo to 4.1 * Chores * Refactor storage tests * Improve test coverage across python versions in CI v3.0.0 ------ Release Date: 2023-01-04 * Features * Added etcd storage support for fixed window strategies * Compatibility * Removed deprecated GAE Memcached storage * Updated minimum dependencies for mongodb * Updated dependency for async memcached on python 3.11 v2.8.0 ------ Release Date: 2022-12-23 * Chores * Make rate limit items hashable * Update test certificates v2.7.2 ------ Release Date: 2022-12-11 * Compatibility Updates * Update documentation dependencies * Relax version constraint for packaging dependency * Bump CI to use python 3.11 final v2.7.1 ------ Release Date: 2022-10-20 * Compatibility Updates * Increase pymemcached dependency range to in include 4.x * Add python 3.11 rc2 to CI v2.7.0 ------ Release Date: 2022-07-16 * Compatibility Updates * Update :pypi:`coredis` requirements to include 4.x versions * Remove CI / support for redis < 6.0 * Remove python 3.7 from CI * Add redis 7.0 in CI v2.6.3 ------ Release Date: 2022-06-05 * Chores * Update development dependencies * Add CI for python 3.11 * Increase test coverage for redis sentinel v2.6.2 ------ Release Date: 2022-05-12 * Compatibility Updates * Update :pypi:`motor` requirements to include 3.x version * Update async redis sentinel implementation to remove use of deprecated methods. * Fix compatibility issue with asyncio redis ``reset`` method in cluster mode when used with :pypi:`coredis` versions >= 3.5.0 v2.6.1 ------ Release Date: 2022-04-25 * Bug Fix * Fix typing regression with strategy constructors `Issue 88 `_ v2.6.0 ------ Release Date: 2022-04-25 * Deprecation * Removed tests for rediscluster using the :pypi:`redis-py-cluster` library * Bug Fix * Fix incorrect ``__slots__`` declaration in :class:`limits.RateLimitItem` and it's subclasses (`Issue #121 `__) v2.5.4 ------ Release Date: 2022-04-25 * Bug Fix * Fix typing regression with strategy constructors `Issue 88 `_ v2.5.3 ------ Release Date: 2022-04-22 * Chore * Automate Github releases v2.5.2 ------ Release Date: 2022-04-17 * Chore * Increase strictness of type checking and annotations * Ensure installations from source distributions are PEP-561 compliant v2.5.1 ------ Release Date: 2022-04-15 * Chore * Ensure storage reset methods have consistent signature v2.5.0 ------ Release Date: 2022-04-13 * Feature * Add support for using redis cluster via the official redis client * Update coredis dependency to use 3.x * Deprecations * Deprecate using redis-py-cluster * Chores * Remove beta tags for async support * Update code base to remove legacy syntax * Tighten up CI test dependencies v2.4.0 ------ Release Date: 2022-03-10 * Feature * Allow passing an explicit connection pool to redis storage. Addresses `Issue 77 `_ v2.3.3 ------ Release Date: 2022-02-03 * Feature * Add support for dns seed list when using mongodb v2.3.2 ------ Release Date: 2022-01-30 * Chores * Improve authentication tests for redis * Update documentation theme * Pin pip version for CI v2.3.1 ------ Release Date: 2022-01-21 * Bug fix * Fix backward incompatible change that separated sentinel and connection args for redis sentinel (introduced in 2.1.0). Addresses `Issue 97 `_ v2.3.0 ------ Release Date: 2022-01-15 * Feature * Add support for custom cost per hit * Bug fix * Fix installation issues with missing setuptools v2.2.0 ------ Release Date: 2022-01-05 * Feature * Enable async redis for python 3.10 via coredis * Chore * Fix typing issue with strategy constructors v2.1.1 ------ Release Date: 2022-01-02 * Feature * Enable async memcache for python 3.10 * Bug fix * Ensure window expiry is reported in local time for mongodb * Fix inconsistent expiry for fixed window with memcached * Chore * Improve strategy tests v2.1.0 ------ Release Date: 2021-12-22 * Feature * Add beta asyncio support * Add beta mongodb support * Add option to install with extras for different storages * Bug fix * Fix custom option for cluster client in memcached * Fix separation of sentinel & connection args in :class:`limits.storage.RedisSentinelStorage` * Deprecation * Deprecate GAEMemcached support * Remove use of unused `no_add` argument in :meth:`limits.storage.MovingWindowSupport.acquire_entry` * Chore * Documentation theme upgrades * Code linting * Add compatibility CI workflow v2.0.3 ------ Release Date: 2021-11-28 * Chore * Ensure package is marked PEP-561 compliant v2.0.1 ------ Release Date: 2021-11-28 * Chore * Added type annotations v2.0.0 ------ Release Date: 2021-11-27 * Chore * Drop support for python < 3.7 v1.6 ---- Release Date: 2021-11-27 * Chore * Final release for python < 3.7 v1.5.1 ------ Release Date: 2020-02-25 * Bug fix * Remove duplicate call to ttl in RedisStorage * Initialize master/slave connections for RedisSentinel once v1.5 ---- Release Date: 2020-01-23 * Bug fix for handling TTL response from Redis when key doesn’t exist * Support Memcache over unix domain socket * Support Memcache cluster * Pass through constructor keyword arguments to underlying storage constructor(s) * CI & test improvements v1.4.1 ------ Release Date: 2019-12-15 * Bug fix for implementation of clear in MemoryStorage not working with MovingWindow v1.4 ---- Release Date: 2019-12-14 * Expose API for clearing individual limits * Support for redis over unix domain socket * Support extra arguments to redis storage v1.3 ------ Release Date: 2018-01-28 * Remove pinging redis on initialization v1.2.1 ------ Release Date: 2017-01-02 * Fix regression with csv as multiple limits v1.2.0 ------ Release Date: 2016-09-21 * Support reset for RedisStorage * Improved rate limit string parsing v1.1.1 ------ Release Date: 2016-03-14 * Support reset for MemoryStorage * Support for `rediss://` storage scheme to connect to redis over ssl v1.1 ---- Release Date: 2015-12-20 * Redis Cluster support * Authentiation for Redis Sentinel * Bug fix for locking failures with redis. v1.0.9 ------ Release Date: 2015-10-08 * Redis Sentinel storage support * Drop support for python 2.6 * Documentation improvements v1.0.7 ------ Release Date: 2015-06-07 * No functional change v1.0.6 ------ Release Date: 2015-05-13 * Bug fixes for .test() logic v1.0.5 ------ Release Date: 2015-05-12 * Add support for testing a rate limit before hitting it. v1.0.3 ------ Release Date: 2015-03-20 * Add support for passing options to storage backend v1.0.2 ------ Release Date: 2015-01-10 * Improved documentation * Improved usability of API. Renamed RateLimitItem subclasses. v1.0.1 ------ Release Date: 2015-01-08 * Example usage in docs. v1.0.0 ------ Release Date: 2015-01-08 * Initial import of common rate limiting code from `Flask-Limiter `_ limits-3.12.0/LICENSE.txt000066400000000000000000000020451462017272200147250ustar00rootroot00000000000000Copyright (c) 2023 Ali-Akber Saifee Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. limits-3.12.0/MANIFEST.in000066400000000000000000000006101462017272200146340ustar00rootroot00000000000000include README.rst include LICENSE.txt include HISTORY.rst include CONTRIBUTIONS.rst include CLASSIFIERS include versioneer.py recursive-include requirements *.txt recursive-include requirements/storage *.txt recursive-include doc/source * recursive-include doc *.py Make* include limits/_version.py include limits/py.typed recursive-include limits *.lua recursive-include limits/resources * limits-3.12.0/Makefile000066400000000000000000000003101462017272200145330ustar00rootroot00000000000000lint: ruff check --select I ruff format --check limits tests ruff limits tests mypy limits lint-fix: ruff check --select I --fix ruff format limits tests ruff --fix limits tests mypy limits limits-3.12.0/README.rst000066400000000000000000000110101462017272200145610ustar00rootroot00000000000000.. |ci| image:: https://github.com/alisaifee/limits/workflows/CI/badge.svg?branch=master :target: https://github.com/alisaifee/limits/actions?query=branch%3Amaster+workflow%3ACI .. |codecov| image:: https://codecov.io/gh/alisaifee/limits/branch/master/graph/badge.svg :target: https://codecov.io/gh/alisaifee/limits .. |pypi| image:: https://img.shields.io/pypi/v/limits.svg?style=flat-square :target: https://pypi.python.org/pypi/limits .. |pypi-versions| image:: https://img.shields.io/pypi/pyversions/limits?style=flat-square :target: https://pypi.python.org/pypi/limits .. |license| image:: https://img.shields.io/pypi/l/limits.svg?style=flat-square :target: https://pypi.python.org/pypi/limits .. |docs| image:: https://readthedocs.org/projects/limits/badge/?version=latest :target: https://limits.readthedocs.org limits ------ |docs| |ci| |codecov| |pypi| |pypi-versions| |license| **limits** is a python library to perform rate limiting with commonly used storage backends (Redis, Memcached, MongoDB & Etcd). Supported Strategies ==================== `Fixed Window `_ This strategy resets at a fixed interval (start of minute, hour, day etc). For example, given a rate limit of ``10/minute`` the strategy will: - Allow 10 requests between ``00:01:00`` and ``00:02:00`` - Allow 10 requests at ``00:00:59`` and 10 more requests at ``00:01:00`` `Fixed Window (Elastic) `_ Identical to Fixed window, except every breach of rate limit results in an extension to the time out. For example a rate limit of `1/minute` hit twice within a minute will result in a lock-out for two minutes. `Moving Window `_ Sliding window strategy enforces a rate limit of N/(m time units) on the **last m** time units at the second granularity. For example, with a rate limit of ``10/minute``: - Allow 9 requests that arrive at ``00:00:59`` - Allow another request that arrives at ``00:01:00`` - Reject the request that arrives at ``00:01:01`` Storage backends ================ - `Redis `_ - `Memcached `_ - `MongoDB `_ - `Etcd `_ - `In-Memory `_ Dive right in ============= Initialize the storage backend .. code-block:: python from limits import storage memory_storage = storage.MemoryStorage() # or memcached memcached_storage = storage.MemcachedStorage("memcached://localhost:11211") # or redis redis_storage = storage.RedisStorage("redis://localhost:6379") # or use the factory storage_uri = "memcached://localhost:11211" some_storage = storage.storage_from_string(storage_uri) Initialize a rate limiter with the Moving Window Strategy .. code-block:: python from limits import strategies moving_window = strategies.MovingWindowRateLimiter(memory_storage) Initialize a rate limit .. code-block:: python from limits import parse one_per_minute = parse("1/minute") Initialize a rate limit explicitly .. code-block:: python from limits import RateLimitItemPerSecond one_per_second = RateLimitItemPerSecond(1, 1) Test the limits .. code-block:: python assert True == moving_window.hit(one_per_minute, "test_namespace", "foo") assert False == moving_window.hit(one_per_minute, "test_namespace", "foo") assert True == moving_window.hit(one_per_minute, "test_namespace", "bar") assert True == moving_window.hit(one_per_second, "test_namespace", "foo") assert False == moving_window.hit(one_per_second, "test_namespace", "foo") time.sleep(1) assert True == moving_window.hit(one_per_second, "test_namespace", "foo") Check specific limits without hitting them .. code-block:: python assert True == moving_window.hit(one_per_second, "test_namespace", "foo") while not moving_window.test(one_per_second, "test_namespace", "foo"): time.sleep(0.01) assert True == moving_window.hit(one_per_second, "test_namespace", "foo") Links ===== * `Documentation `_ * `Changelog `_ limits-3.12.0/doc/000077500000000000000000000000001462017272200136465ustar00rootroot00000000000000limits-3.12.0/doc/Makefile000066400000000000000000000152271462017272200153150ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Flask-Ratelimit.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Flask-Ratelimit.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Flask-Ratelimit" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Flask-Ratelimit" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." limits-3.12.0/doc/source/000077500000000000000000000000001462017272200151465ustar00rootroot00000000000000limits-3.12.0/doc/source/_static/000077500000000000000000000000001462017272200165745ustar00rootroot00000000000000limits-3.12.0/doc/source/_static/custom.css000066400000000000000000000002051462017272200206150ustar00rootroot00000000000000.badges { display: flex; padding: 5px; flex-direction: rootow; justify-content: center; } .header-badge { padding: 2px; } limits-3.12.0/doc/source/api.rst000066400000000000000000000072711462017272200164600ustar00rootroot00000000000000:tocdepth: 4 ============= API Reference ============= .. autosummary:: limits limits.strategies limits.storage limits.aio.strategies limits.aio.storage .. currentmodule:: limits Strategies ========== Synchronous Strategies ---------------------- The available built in rate limiting strategies which expect a single parameter: a subclass of :class:`~limits.storage.Storage`. .. currentmodule:: limits.strategies Provided by :mod:`limits.strategies` .. autoclass:: FixedWindowRateLimiter .. autoclass:: FixedWindowElasticExpiryRateLimiter .. autoclass:: MovingWindowRateLimiter All strategies implement the same abstract base class: .. autoclass:: RateLimiter Async Strategies ---------------- These variants should be used in for asyncio support. These strategies expose async variants and expect a subclass of :class:`limits.aio.storage.Storage` .. currentmodule:: limits.aio.strategies Provided by :mod:`limits.aio.strategies` .. autoclass:: FixedWindowRateLimiter .. autoclass:: FixedWindowElasticExpiryRateLimiter .. autoclass:: MovingWindowRateLimiter All strategies implement the same abstract base class: .. autoclass:: RateLimiter Storage ======= Storage Factory function ------------------------ Provided by :mod:`limits.storage` .. autofunction:: limits.storage.storage_from_string Synchronous Storage ------------------- Provided by :mod:`limits.storage` .. currentmodule:: limits.storage In-Memory Storage ^^^^^^^^^^^^^^^^^ .. autoclass:: MemoryStorage Redis Storage ^^^^^^^^^^^^^ .. autoclass:: RedisStorage Redis Cluster Storage ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: RedisClusterStorage Redis Sentinel Storage ^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: RedisSentinelStorage Memcached Storage ^^^^^^^^^^^^^^^^^ .. autoclass:: MemcachedStorage MongoDB Storage ^^^^^^^^^^^^^^^ .. autoclass:: MongoDBStorage Etcd Storage ^^^^^^^^^^^^ .. autoclass:: EtcdStorage Async Storage ------------- Provided by :mod:`limits.aio.storage` .. currentmodule:: limits.aio.storage Async In-Memory Storage ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: MemoryStorage Async Redis Storage ^^^^^^^^^^^^^^^^^^^ .. autoclass:: RedisStorage Async Redis Cluster Storage ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: RedisClusterStorage Async Redis Sentinel Storage ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: RedisSentinelStorage Async Memcached Storage ^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: MemcachedStorage Async MongoDB Storage ^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: MongoDBStorage Async Etcd Storage ^^^^^^^^^^^^^^^^^^ .. autoclass:: EtcdStorage Abstract storage classes ------------------------ .. autoclass:: limits.storage.Storage .. autoclass:: limits.storage.MovingWindowSupport Async variants ^^^^^^^^^^^^^^ .. autoclass:: limits.aio.storage.Storage .. autoclass:: limits.aio.storage.MovingWindowSupport Rate Limits =========== .. currentmodule:: limits Provided by :mod:`limits` Parsing functions ----------------- .. autofunction:: parse .. autofunction:: parse_many Rate limit granularities ------------------------ All rate limit items implement :class:`RateLimitItem` by declaring a :attr:`GRANULARITY` .. autoclass:: RateLimitItem ------ .. autoclass:: RateLimitItemPerSecond .. autoclass:: RateLimitItemPerMinute .. autoclass:: RateLimitItemPerHour .. autoclass:: RateLimitItemPerDay .. autoclass:: RateLimitItemPerMonth .. autoclass:: RateLimitItemPerYear Structures ========== .. autoclass:: limits.WindowStats :no-inherited-members: Exceptions ========== .. autoexception:: limits.errors.ConfigurationError :no-inherited-members: .. autoexception:: limits.errors.ConcurrentUpdateError :no-inherited-members: .. autoexception:: limits.errors.StorageError :no-inherited-members: limits-3.12.0/doc/source/async.rst000066400000000000000000000022311462017272200170130ustar00rootroot00000000000000============= Async Support ============= .. versionadded:: 2.1 A new namespace ``limits.aio`` is available which mirrors the original ``limits.storage`` and ``limits.strategies`` packages. The following async storage backends are implemented: - In-Memory - Redis (via `coredis `_) - Memcached (via `emcache `_) - MongoDB (via `motor `_) - Etcd (via `aetcd `_) Quick start =========== This example demonstrates the subtle differences in the ``limits.aio`` namespace: .. code:: from limits import parse from limits.storage import storage_from_string from limits.aio.strategies import MovingWindowRateLimiter redis = storage_from_string("async+redis://localhost:6379") moving_window = MovingWindowRateLimiter(redis) one_per_minute = parse("1/minute") async def hit(): return await moving_window.hit(one_per_minute, "test_namespace", "foo") Refer to :ref:`api:async storage` for more implementation details of the async storage backends, and :ref:`api:async strategies` for the async rate limit strategies API. limits-3.12.0/doc/source/changelog.rst000066400000000000000000000000371462017272200176270ustar00rootroot00000000000000.. include:: ../../HISTORY.rst limits-3.12.0/doc/source/conf.py000066400000000000000000000052201462017272200164440ustar00rootroot00000000000000# import os import sys sys.path.insert(0, os.path.abspath("../../")) sys.path.insert(0, os.path.abspath("./")) from theme_config import * import limits project = "limits" description = "limits is a python library to perform rate limiting with commonly used storage backends" copyright = "2023, Ali-Akber Saifee" if ".post0.dev" in limits.__version__: version, ahead = limits.__version__.split(".post0.dev") else: version = limits.__version__ release = version html_static_path = ["./_static"] html_css_files = [ "custom.css", "https://fonts.googleapis.com/css2?family=Fira+Code:wght@300;400;700&family=Fira+Sans:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&display=swap", ] html_title = f"{project} {{{release}}}" try: ahead = int(ahead) if ahead > 0: html_theme_options[ "announcement" ] = f""" This is a development version. The documentation for the latest version: {release} can be found here """ html_title = f"{project} {{dev}}" except: pass extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosectionlabel", "sphinx.ext.autosummary", "sphinx.ext.extlinks", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.viewcode", "sphinxext.opengraph", "sphinxcontrib.programoutput", "sphinx_copybutton", "sphinx_inline_tabs", "sphinx_paramlinks", ] autodoc_default_options = { "members": True, "inherited-members": True, "inherit-docstrings": True, "member-order": "bysource", } add_module_names = False autoclass_content = "both" autodoc_typehints_format = "short" autosectionlabel_maxdepth = 3 autosectionlabel_prefix_document = True extlinks = {"pypi": ("https://pypi.org/project/%s", "%s")} intersphinx_mapping = { "python": ("http://docs.python.org/", None), "coredis": ("https://coredis.readthedocs.io/en/latest/", None), "emcache": ("https://emcache.readthedocs.io/en/latest/", None), "motor": ("https://motor.readthedocs.io/en/stable/", None), "redis-py-cluster": ("https://redis-py-cluster.readthedocs.io/en/latest/", None), "redis-py": ("https://redis-py.readthedocs.io/en/latest/", None), "pymemcache": ("https://pymemcache.readthedocs.io/en/latest/", None), "pymongo": ("https://pymongo.readthedocs.io/en/stable/", None), "python-etcd3": ("https://python-etcd3.readthedocs.io/en/latest/", None), "aetcd": ("https://aetcd.readthedocs.io/en/latest/", None), } limits-3.12.0/doc/source/custom-storage.rst000066400000000000000000000061701462017272200206600ustar00rootroot00000000000000.. currentmodule:: limits ======================= Custom storage backends ======================= The **limits** package ships with a few storage implementations which allow you to get started with some common data stores (redis & memcached) used for rate limiting. To accommodate customizations to either the default storage backends or different storage backends altogether, **limits** uses a registry pattern that makes it painless to add your own custom storage (without having to submit patches to the package itself). Creating a custom backend requires: #. Subclassing :class:`limits.storage.Storage` or :class:`limits.aio.storage.Storage` #. Providing implementations for the abstractmethods of :class:`~limits.storage.Storage` #. If the storage can support the :ref:`strategies:moving window` strategy - additionally implementing the methods from :class:`~limits.storage.MovingWindowSupport` #. Providing naming *schemes* that can be used to lookup the custom storage in the storage registry. (Refer to :ref:`storage:storage scheme` for more details) Example ======= The following example shows two backend stores: one which doesn't implement the :ref:`strategies:moving window` strategy and one that does. Do note the :code:`STORAGE_SCHEME` class variables which result in the classes getting registered with the **limits** storage registry:: import urlparse from limits.storage import MovingWindowSupport from limits.storage import Storage import time class AwesomeStorage(Storage): STORAGE_SCHEME = ["awesomedb"] def __init__(self, uri, **options): self.awesomesness = options.get("awesomeness", None) self.host = urlparse.urlparse(uri).netloc self.port = urlparse.urlparse(uri).port def check(self) -> bool: return True def get_expiry(self, key:str) -> int: return int(time.time()) def incr(self, key: str, expiry: int, elastic_expiry=False) -> int: return def get(self, key): return 0 class AwesomerStorage(Storage, MovingWindowSupport): STORAGE_SCHEME = ["awesomerdb"] def __init__(self, uri, **options): self.awesomesness = options.get("awesomeness", None) self.host = urlparse.urlparse(uri).netloc self.port = urlparse.urlparse(uri).port def check(self): return True def get_expiry(self, key): return int(time.time()) def incr(self, key, expiry, elastic_expiry=False): return def get(self, key): return 0 def acquire_entry(self, key, limit, expiry): return True def get_moving_window( self, key, limit, expiry ): return [0, 10] Once the above implementations are declared you can look them up using the :ref:`api:storage factory function` in the following manner:: from limits.storage import storage_from_string awesome = storage_from_string("awesomedb://localhoax:42", awesomeness=0) awesomer = storage_from_string("awesomerdb://localhoax:42", awesomeness=1) limits-3.12.0/doc/source/index.rst000066400000000000000000000060001462017272200170030ustar00rootroot00000000000000======== *limits* ======== .. container:: badges .. image:: https://img.shields.io/github/last-commit/alisaifee/limits?logo=github&style=for-the-badge&labelColor=#282828 :target: https://github.com/alisaifee/limits :class: header-badge .. image:: https://img.shields.io/github/actions/workflow/status/alisaifee/limits/main.yml?logo=github&style=for-the-badge&labelColor=#282828 :target: https://github.com/alisaifee/limits/actions/workflows/main.yml .. image:: https://img.shields.io/codecov/c/github/alisaifee/limits?logo=codecov&style=for-the-badge&labelColor=#282828 :target: https://app.codecov.io/gh/alisaifee/limits :class: header-badge .. image:: https://img.shields.io/pypi/pyversions/limits?style=for-the-badge&logo=pypi :target: https://pypi.org/project/limits :class: header-badge ---- *limits* is a python library to perform rate limiting with commonly used storage backends (Redis, Memcached & MongoDB). Get started by taking a look at :ref:`installation:installation` and :ref:`quickstart:quickstart`. To learn more about the different strategies refer to the :ref:`strategies:rate limiting strategies` section. For an overview of supported backends refer to :ref:`storage:storage backends`. .. toctree:: :maxdepth: 3 :hidden: installation quickstart strategies storage async api custom-storage changelog ---- Development =========== The source is available on `Github `_ To get started .. code:: console $ git clone git://github.com/alisaifee/limits.git $ cd limits $ pip install -r requirements/dev.txt Since `limits` integrates with various backend storages, local development and running tests requires a a working `docker & docker-compose installation `_. Running the tests will start the relevant containers automatically - but will leave them running so as to not incur the overhead of starting up on each test run. To run the tests: .. code:: console $ pytest Once you're done - you will probably want to clean up the docker containers: .. code:: console $ docker-compose down Projects using *limits* ======================= - `Flask-Limiter `_ : Rate limiting extension for Flask applications. - `djlimiter `_: Rate limiting middleware for Django applications. - `sanic-limiter `_: Rate limiting middleware for Sanic applications. - `Falcon-Limiter `_ : Rate limiting extension for Falcon applications. - `django-ratelimiter `_: Rate limiting decorator and middleware for Django applications. References ========== - `Redis rate limiting pattern #2 `_ - `DomainTools redis rate limiter `_ .. include:: ../../CONTRIBUTIONS.rst limits-3.12.0/doc/source/installation.rst000066400000000000000000000034001462017272200203760ustar00rootroot00000000000000============ Installation ============ Install the package with pip: .. code:: console $ pip install limits .. tab:: Redis .. code:: console $ pip install limits[redis] Includes .. literalinclude:: ../../requirements/storage/redis.txt .. tab:: RedisCluster .. code:: console $ pip install limits[rediscluster] Includes .. literalinclude:: ../../requirements/storage/rediscluster.txt .. tab:: Memcached .. code:: console $ pip install limits[memcached] Includes .. literalinclude:: ../../requirements/storage/memcached.txt .. tab:: MongoDB .. code:: console $ pip install limits[mongodb] Includes: .. literalinclude:: ../../requirements/storage/mongodb.txt .. tab:: Etcd .. code:: console $ pip install limits[etcd] Includes: .. literalinclude:: ../../requirements/storage/etcd.txt More details around the specifics of each storage backend can be found in :ref:`storage` Async Storage ============= If you are using an async code base you can install the storage dependencies along with the package using the following extras: .. tab:: Redis .. code:: console $ pip install limits[async-redis] Includes: .. literalinclude:: ../../requirements/storage/async-redis.txt .. tab:: Memcached .. code:: console $ pip install limits[async-memcached] Includes: .. literalinclude:: ../../requirements/storage/async-memcached.txt .. tab:: MongoDB .. code:: console $ pip install limits[async-mongodb] Includes: .. literalinclude:: ../../requirements/storage/async-mongodb.txt .. tab:: Etcd .. code:: console $ pip install limits[async-etcd] Includes: .. literalinclude:: ../../requirements/storage/async-etcd.txt limits-3.12.0/doc/source/quickstart.rst000066400000000000000000000064711462017272200201020ustar00rootroot00000000000000========== Quickstart ========== Initialize the strategy & storage ================================= Initialize the storage backend ------------------------------ .. tab:: In Memory .. code:: from limits import storage memory_storage = storage.MemoryStorage() .. tab:: Memcached .. code:: from limits import storage memory_storage = storage.MemcachedStorage( "memcached://localhost:11211" ) .. tab:: Redis .. code:: from limits import storage memory_storage = storage.RedisStorage("redis://localhost:6379/1") Initialize a rate limiter with the :ref:`Moving Window` Strategy ------------------------------------------------------------------------------------------ .. code:: from limits import strategies moving_window = strategies.MovingWindowRateLimiter(memory_storage) Describe the rate limit ======================= Initialize a rate limit using the :ref:`string notation` ----------------------------------------------------------------------------------------------- .. code:: from limits import parse one_per_minute = parse("1/minute") Initialize a rate limit explicitly using a subclass of :class:`~limits.RateLimitItem` ------------------------------------------------------------------------------------- .. code:: from limits import RateLimitItemPerSecond one_per_second = RateLimitItemPerSecond(1, 1) Test the limits =============== Consume the limits ------------------ .. code:: assert True == moving_window.hit(one_per_minute, "test_namespace", "foo") assert False == moving_window.hit(one_per_minute, "test_namespace", "foo") assert True == moving_window.hit(one_per_minute, "test_namespace", "bar") assert True == moving_window.hit(one_per_second, "test_namespace", "foo") assert False == moving_window.hit(one_per_second, "test_namespace", "foo") time.sleep(1) assert True == moving_window.hit(one_per_second, "test_namespace", "foo") Check without consuming ----------------------- .. code:: assert True == moving_window.hit(one_per_second, "test_namespace", "foo") while not moving_window.test(one_per_second, "test_namespace", "foo"): time.sleep(0.01) assert True == moving_window.hit(one_per_second, "test_namespace", "foo") Clear a limit ============= .. code:: assert True == moving_window.hit(one_per_minute, "test_namespace", "foo") assert False == moving_window.hit(one_per_minute, "test_namespace", "foo") moving_window.clear(one_per_minute, "test_namespace", "foo") assert True == moving_window.hit(one_per_minute, "test_namespace", "foo") .. _ratelimit-string: ========================== Rate limit string notation ========================== Instead of manually constructing instances of :class:`~limits.RateLimitItem` you can instead use the following :ref:`api:parsing functions`. - :func:`~limits.parse` - :func:`~limits.parse_many` These functions accept rate limits specified as strings following the format:: [count] [per|/] [n (optional)] [second|minute|hour|day|month|year] You can combine rate limits by separating them with a delimiter of your choice. Examples ======== * ``10 per hour`` * ``10/hour`` * ``10/hour;100/day;2000 per year`` * ``100/day, 500/7days`` limits-3.12.0/doc/source/storage.rst000066400000000000000000000205771462017272200173570ustar00rootroot00000000000000.. currentmodule:: limits .. _storage: ================ Storage Backends ================ Supported versions ================== .. image:: https://img.shields.io/github/actions/workflow/status/alisaifee/limits/compatibility.yml?logo=github&style=for-the-badge&labelColor=#282828 :alt: GitHub Workflow Status :target: https://github.com/alisaifee/limits/actions/workflows/compatibility.yml ----- **limits** is tested and known to work with the following versions of the dependency libraries and the associated storage versions. The CI tests against these versions on a nightly basis and you can see the results in `github `_. .. tab:: Redis Dependency versions: .. literalinclude:: ../../requirements/storage/redis.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-redis.txt `Redis `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_REDIS_SERVER_VERSION=[\d\.]+' | cut -d = -f 2" Redis with SSL .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_REDIS_SERVER_SSL_VERSION=[\d\.]+' | cut -d = -f 2" `Redis Sentinel `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_REDIS_SENTINEL_SERVER_VERSION=[\d\.]+' | cut -d = -f 2" .. tab:: Redis Cluster Dependency versions: .. literalinclude:: ../../requirements/storage/rediscluster.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-redis.txt `Redis cluster `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_REDIS_SERVER_VERSION=[\d\.]+' | cut -d = -f 2" .. tab:: Memcached Dependency versions: .. literalinclude:: ../../requirements/storage/memcached.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-memcached.txt `Memcached `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_MEMCACHED_SERVER_VERSION=[\d\.]+' | cut -d = -f 2" .. tab:: MongoDB Dependency versions: .. literalinclude:: ../../requirements/storage/mongodb.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-mongodb.txt `MongoDB `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_MONGODB_SERVER_VERSION=[\d\.]+' | cut -d = -f 2" .. tab:: Etcd Dependency versions: .. literalinclude:: ../../requirements/storage/etcd.txt Dependency versions (async): .. literalinclude:: ../../requirements/storage/async-etcd.txt `Etcd `_ .. program-output:: bash -c "cat ../../.github/workflows/compatibility.yml | grep -o -P 'LIMITS_ETCD_SERVER_VERSION=[\d\.]+' | cut -d = -f 2" Storage scheme ============== **limits** uses a url style storage scheme notation (similar to the JDBC driver connection string notation) for configuring and initializing storage backends. This notation additionally provides a simple mechanism to both identify and configure the backend implementation based on a single string argument. The storage scheme follows the format :code:`{scheme}://{parameters}` :func:`limits.storage.storage_from_string` is provided to lookup and construct an instance of a storage based on the storage scheme. For example:: import limits.storage uri = "redis://localhost:9999" options = {} redis_storage = limits.storage.storage_from_string(uri, **options) The additional `options` key-word arguments are passed as is to the constructor of the storage and handled differently by each implementation. Please refer to the API documentation in the :ref:`api:storage` section for details. Examples ======== In-Memory Storage ----------------- The in-memory storage (:class:`~limits.storage.MemoryStorage`) takes no parameters so the only relevant value is :code:`memory://` Memcached Storage ----------------- Requires the location of the memcached server(s). As such the parameters is a comma separated list of :code:`{host}:{port}` locations such as :code:`memcached://localhost:11211` or :code:`memcached://localhost:11211,localhost:11212,192.168.1.1:11211` etc... or a path to a unix domain socket such as :code:`memcached:///var/tmp/path/to/sock` Depends on: :pypi:`pymemcache` Redis Storage ------------- Requires the location of the redis server and optionally the database number. :code:`redis://localhost:6379` or :code:`redis://localhost:6379/n` (for database `n`). If the redis server is listening over a unix domain socket you can use :code:`redis+unix:///path/to/sock` or :code:`redis+unix:///path/to/socket?db=n` (for database `n`). If the database is password protected the password can be provided in the url, for example :code:`redis://:foobared@localhost:6379` or :code:`redis+unix://:foobered/path/to/socket` if using a UDS.. For scenarios where a redis connection pool is already available and can be reused, it can be provided in :paramref:`~limits.storage.storage_from_string.options`, for example:: pool = redis.connections.BlockingConnectionPool.from_url("redis://.....") storage_from_string("redis://", connection_pool=pool) Depends on: :pypi:`redis` Redis+SSL Storage ----------------- The official Redis client :pypi:`redis` supports redis connections over SSL with the scheme You can add ssl related parameters in the url itself, for example: :code:`rediss://localhost:6379/0?ssl_ca_certs=./tls/ca.crt&ssl_keyfile=./tls/client.key`. Depends on: :pypi:`redis` Redis+Sentinel Storage ---------------------- Requires the location(s) of the redis sentinal instances and the `service-name` that is monitored by the sentinels. :code:`redis+sentinel://localhost:26379/my-redis-service` or :code:`redis+sentinel://localhost:26379,localhost:26380/my-redis-service`. If the sentinel is password protected the username and/or password can be provided in the url, for example :code:`redis+sentinel://:sekret@localhost:26379/my-redis-service` When authentication details are provided in the url they will be used for both the sentinel and as connection arguments for the underlying redis nodes managed by the sentinel. If you need fine grained control it is recommended to use the additional :paramref:`~limits.storage.storage_from_string.options` arguments. More details can be found in the API documentation for :class:`~limits.storage.RedisSentinelStorage` (or the aysnc version: :class:`~limits.aio.storage.RedisSentinelStorage`). Depends on: :pypi:`redis` Redis Cluster Storage --------------------- Requires the location(s) of the redis cluster startup nodes (One is enough). :code:`redis+cluster://localhost:7000` or :code:`redis+cluster://localhost:7000,localhost:7001` If the cluster is password protected the username and/or password can be provided in the url, for example :code:`redis+cluster://:sekret@localhost:7000,localhost:7001` Depends on: :pypi:`redis` MongoDB Storage --------------- Requires the location(s) of a mongodb installation using the uri schema described by the `Mongodb URI Specification `_ Examples: - Local instance: ``mongodb://localhost:27017/`` - Instance with SSL: ``mongodb://mymongo.com/?tls=true`` - Local instance with SSL & self signed/invalid certificate: ``mongodb://localhost:27017/?tls=true&tlsAllowInvalidCertificates=true`` Depends on: :pypi:`pymongo` Etcd Storage ------------ Requires the location of an etcd node Example: ``etcd://localhost:2379`` Depends on: :pypi:`etcd3` Async Storage ============= .. versionadded:: 2.1 When using limits in an async code base the same uri schema can be used to query for an async implementation of the storage by prefixing the scheme with ``async+``. For example: - ``async+redis://localhost:6379/0`` - ``async+rediss://localhost:6379/0`` - ``async+redis+cluster://localhost:7000,localhost:7001`` - ``async+redis+sentinel://:sekret@localhost:26379/my-redis-service`` - ``async+memcached://localhost:11211`` - ``async+etcd://localhost:2379`` - ``async+memory://`` For implementation details of currently supported async backends refer to :ref:`api:async storage` limits-3.12.0/doc/source/strategies.rst000066400000000000000000000034741462017272200200620ustar00rootroot00000000000000======================== Rate limiting strategies ======================== Fixed Window ============ This is the most memory efficient strategy to use as it maintains one counter per resource and rate limit. It does however have its drawbacks as it allows bursts within each window - thus allowing an 'attacker' to by-pass the limits. The effects of these bursts can be partially circumvented by enforcing multiple granularities of windows per resource. For example, if you specify a ``100/minute`` rate limit on a route, this strategy will allow 100 hits in the last second of one window and a 100 more in the first second of the next window. To ensure that such bursts are managed, you could add a second rate limit of ``2/second`` on the same route. Fixed Window with Elastic Expiry ================================ This strategy works almost identically to the Fixed Window strategy with the exception that each hit results in the extension of the window. This strategy works well for creating large penalties for breaching a rate limit. For example, if you specify a ``100/minute`` rate limit on a route and it is being attacked at the rate of 5 hits per second for 2 minutes - the attacker will be locked out of the resource for an extra 60 seconds after the last hit. This strategy helps circumvent bursts. Moving Window ============= .. warning:: The moving window strategy is not implemented for the ``memcached`` storage backend. This strategy is the most effective for preventing bursts from by-passing the rate limit as the window for each limit is not fixed at the start and end of each time unit (i.e. N/second for a moving window means N in the last 1000 milliseconds). There is however a higher memory cost associated with this strategy as it requires ``N`` items to be maintained in memory per resource and rate limit. limits-3.12.0/doc/source/theme_config.py000066400000000000000000000026601462017272200201530ustar00rootroot00000000000000colors = { "bg0": " #fbf1c7", "bg1": " #ebdbb2", "bg2": " #d5c4a1", "bg3": " #bdae93", "bg4": " #a89984", "gry": " #928374", "fg4": " #7c6f64", "fg3": " #665c54", "fg2": " #504945", "fg1": " #3c3836", "fg0": " #282828", "red": " #cc241d", "red2": " #9d0006", "orange": " #d65d0e", "orange2": " #af3a03", "yellow": " #d79921", "yellow2": " #b57614", "green": " #98971a", "green2": " #79740e", "aqua": " #689d6a", "aqua2": " #427b58", "blue": " #458588", "blue2": " #076678", "purple": " #b16286", "purple2": " #8f3f71", } html_theme = "furo" html_theme_options = { "light_css_variables": { "font-stack": "Fira Sans, sans-serif", "font-stack--monospace": "Fira Code, monospace", "color-brand-primary": colors["purple2"], "color-brand-content": colors["blue2"], }, "dark_css_variables": { "color-brand-primary": colors["purple"], "color-brand-content": colors["blue"], "color-background-primary": colors["fg1"], "color-background-secondary": colors["fg0"], "color-foreground-primary": colors["bg0"], "color-foreground-secondary": colors["bg1"], "color-highlighted-background": colors["yellow"], "color-highlight-on-target": colors["fg2"], }, } highlight_language = "python3" pygments_style = "gruvbox-light" pygments_dark_style = "gruvbox-dark" limits-3.12.0/docker-compose.yml000066400000000000000000000240241462017272200165400ustar00rootroot00000000000000version: '3.2' services: # etcd etcd: image: "bitnami/etcd:${LIMITS_ETCD_SERVER_VERSION:-latest}" environment: - ALLOW_NONE_AUTHENTICATION=yes - ETCD_ADVERTISE_CLIENT_URLS=http://etcd:2379 - ETCD_ELECTION_TIMEOUT=600 ports: - 2379:2379 - 2380:2380 # memcached memcached-1: image: "memcached:${LIMITS_MEMCACHED_SERVER_VERSION:-latest}" command: -p 22122 ports: - 22122:22122 memcached-2: image: "memcached:${LIMITS_MEMCACHED_SERVER_VERSION:-latest}" command: -p 22123 ports: - 22123:22123 memcached-uds: image: "memcached:${LIMITS_MEMCACHED_SERVER_VERSION:-latest}" command: sh -c "test ${HOST_OS} = \"Darwin\" && exit || memcached -s /sockets/limits.memcached.sock -a 777" volumes: - type: bind source: /tmp/ target: /sockets/ # redis sentinel redis-sentinel-master: image: "redis:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}" command: redis-server --port 6380 ports: - '6380:6380' redis-sentinel-slave: image: "redis:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}" depends_on: [redis-sentinel-master] command: redis-server --port 6381 --slaveof ${HOST_IP} 6380 --slave-announce-ip ${HOST_IP} ports: - '6381:6381' redis-sentinel-master-auth: image: "redis:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}" command: redis-server --port 6382 --requirepass sekret ports: - '6382:6382' redis-sentinel-slave-auth: image: "redis:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}" depends_on: [redis-sentinel-master-auth] command: redis-server --port 6383 --slaveof ${HOST_IP} 6382 --slave-announce-ip ${HOST_IP} --requirepass sekret --masterauth sekret ports: - '6383:6383' redis-sentinel: image: 'bitnami/redis-sentinel:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}' depends_on: [redis-sentinel-slave] environment: - REDIS_MASTER_HOST=${HOST_IP} - REDIS_MASTER_PORT_NUMBER=6380 ports: - '26379:26379' redis-sentinel-auth: image: 'bitnami/redis-sentinel:${LIMITS_REDIS_SENTINEL_SERVER_VERSION:-latest}' depends_on: [redis-sentinel-slave-auth] environment: - REDIS_MASTER_HOST=${HOST_IP} - REDIS_MASTER_PASSWORD=sekret - REDIS_MASTER_PORT_NUMBER=6382 - REDIS_SENTINEL_PASSWORD=sekret ports: - '36379:26379' # cluster redis-cluster-1: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7001 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7001:7001' - '17001:17001' redis-cluster-2: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7002 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7002:7002' - '17002:17002' redis-cluster-3: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7003 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7003:7003' - '17003:17003' redis-cluster-4: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7004 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7004:7004' - '17004:17004' redis-cluster-5: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7005 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7005:7005' - '17005:17005' redis-cluster-6: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7006 --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} ports: - '7006:7006' - '17006:17006' redis-cluster-init: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: bash -c "echo yes | redis-cli --cluster create --cluster-replicas 1 ${HOST_IP}:7001 ${HOST_IP}:7002 ${HOST_IP}:7003 ${HOST_IP}:7004 ${HOST_IP}:7005 ${HOST_IP}:7006" depends_on: [redis-cluster-1, redis-cluster-2, redis-cluster-3, redis-cluster-4, redis-cluster-5, redis-cluster-6] redis-ssl-cluster-1: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8301 ${DEFAULT_ARGS---enable-debug-command yes} --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8301:8301' - '18301:18301' volumes: - ./tests/tls:/tls redis-ssl-cluster-2: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8302 ${DEFAULT_ARGS---enable-debug-command yes} --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8302:8302' - '18302:18302' volumes: - ./tests/tls:/tls redis-ssl-cluster-3: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8303 ${DEFAULT_ARGS---enable-debug-command yes} --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8303:8303' - '18303:18303' volumes: - ./tests/tls:/tls redis-ssl-cluster-4: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8304 ${DEFAULT_ARGS---enable-debug-command yes} --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8304:8304' - '18304:18304' volumes: - ./tests/tls:/tls redis-ssl-cluster-5: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8305 ${DEFAULT_ARGS---enable-debug-command yes} --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8305:8305' - '18305:18305' volumes: - ./tests/tls:/tls redis-ssl-cluster-6: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 0 --tls-port 8306 ${DEFAULT_ARGS---enable-debug-command yes} --tls-replication yes --tls-cluster yes --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8306:8306' - '18306:18306' volumes: - ./tests/tls:/tls redis-ssl-cluster-init: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: bash -c "echo yes | redis-cli --cluster create --cluster-replicas 1 ${HOST_IP}:8301 ${HOST_IP}:8302 ${HOST_IP}:8303 ${HOST_IP}:8304 ${HOST_IP}:8305 ${HOST_IP}:8306 --tls --cert /tls/redis.crt --key /tls/redis.key --cacert /tls/ca.crt " depends_on: [redis-ssl-cluster-1, redis-ssl-cluster-2, redis-ssl-cluster-3, redis-ssl-cluster-4, redis-ssl-cluster-5, redis-ssl-cluster-6] volumes: - ./tests/tls:/tls redis-cluster-auth-1: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 8400 ${DEFAULT_ARGS---enable-debug-command yes} --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --requirepass sekret ports: - '8400:8400' - '18400:18400' redis-cluster-auth-2: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 8401 ${DEFAULT_ARGS---enable-debug-command yes} --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --requirepass sekret ports: - '8401:8401' - '18401:18401' redis-cluster-auth-3: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 8402 ${DEFAULT_ARGS---enable-debug-command yes} --protected-mode no --cluster-enabled yes --loglevel verbose --cluster-announce-ip ${HOST_IP} --requirepass sekret ports: - '8402:8402' - '18402:18402' redis-cluster-auth-init: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: bash -c "echo yes | redis-cli --cluster create --cluster-replicas 0 ${HOST_IP}:8400 ${HOST_IP}:8401 ${HOST_IP}:8402 -a sekret" depends_on: [redis-cluster-auth-1, redis-cluster-auth-2, redis-cluster-auth-3] redis-basic: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7379 ports: - '7379:7379' redis-auth: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: redis-server --port 7389 --requirepass sekret ports: - '7389:7389' redis-ssl: image: "redis:${LIMITS_REDIS_SERVER_SSL_VERSION:-latest}" command: redis-server --port 0 --tls-port 8379 --tls-cert-file /tls/redis.crt --tls-key-file /tls/redis.key --tls-ca-cert-file /tls/ca.crt ports: - '8379:8379' volumes: - ./tests/tls:/tls redis-uds: image: "redis:${LIMITS_REDIS_SERVER_VERSION:-latest}" command: sh -c "test ${HOST_OS} = \"Darwin\" && exit || redis-server --port 0 --unixsocket /sockets/limits.redis.sock --unixsocketperm 777" volumes: - type: bind source: /tmp/ target: /sockets/ mongodb: image: "mongo:${LIMITS_MONGODB_SERVER_VERSION:-latest}" ports: - '37017:27017' dragonfly: image: "docker.dragonflydb.io/dragonflydb/dragonfly:latest" ports: - '21379:6379' ulimits: memlock: -1 limits-3.12.0/limits/000077500000000000000000000000001462017272200144025ustar00rootroot00000000000000limits-3.12.0/limits/__init__.py000066400000000000000000000013301462017272200165100ustar00rootroot00000000000000""" Rate limiting with commonly used storage backends """ from . import _version, aio, storage, strategies from .limits import ( RateLimitItem, RateLimitItemPerDay, RateLimitItemPerHour, RateLimitItemPerMinute, RateLimitItemPerMonth, RateLimitItemPerSecond, RateLimitItemPerYear, ) from .util import WindowStats, parse, parse_many __all__ = [ "RateLimitItem", "RateLimitItemPerYear", "RateLimitItemPerMonth", "RateLimitItemPerDay", "RateLimitItemPerHour", "RateLimitItemPerMinute", "RateLimitItemPerSecond", "aio", "storage", "strategies", "parse", "parse_many", "WindowStats", ] __version__ = _version.get_versions()["version"] # type: ignore limits-3.12.0/limits/_version.py000066400000000000000000000562251462017272200166120ustar00rootroot00000000000000# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.22 (https://github.com/python-versioneer/python-versioneer) """Git implementation of _version.py.""" import errno import functools import os import re import subprocess import sys from typing import Callable, Dict def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "3.12.0" git_full = "ff28751a2326de0ad6a978e316397534acf29b81" git_date = "2024-05-12 10:01:06 -0700" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440-pre" cfg.tag_prefix = "" cfg.parentdir_prefix = "limits-" cfg.versionfile_source = "limits/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs, method): # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen( [command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs, ) break except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, process.returncode return stdout, process.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return { "version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None, } rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print( "Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix) ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r"\d", r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r"\d", r): continue if verbose: print("picking %s" % r) return { "version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date, } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return { "version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None, } @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else [] # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner( GITS, ["describe", "--tags", "--dirty", "--always", "--long", *MATCH_ARGS], cwd=root, ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces): """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver): """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces): """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) else: rendered += ".post0.dev%d" % (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_post_branch(pieces): """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return { "version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None, } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return { "version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date"), } def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None, } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None, } limits-3.12.0/limits/aio/000077500000000000000000000000001462017272200151525ustar00rootroot00000000000000limits-3.12.0/limits/aio/__init__.py000066400000000000000000000001221462017272200172560ustar00rootroot00000000000000from . import storage, strategies __all__ = [ "storage", "strategies", ] limits-3.12.0/limits/aio/storage/000077500000000000000000000000001462017272200166165ustar00rootroot00000000000000limits-3.12.0/limits/aio/storage/__init__.py000066400000000000000000000011231462017272200207240ustar00rootroot00000000000000""" Implementations of storage backends to be used with :class:`limits.aio.strategies.RateLimiter` strategies """ from .base import MovingWindowSupport, Storage from .etcd import EtcdStorage from .memcached import MemcachedStorage from .memory import MemoryStorage from .mongodb import MongoDBStorage from .redis import RedisClusterStorage, RedisSentinelStorage, RedisStorage __all__ = [ "Storage", "MovingWindowSupport", "EtcdStorage", "MemcachedStorage", "MemoryStorage", "MongoDBStorage", "RedisStorage", "RedisClusterStorage", "RedisSentinelStorage", ] limits-3.12.0/limits/aio/storage/base.py000066400000000000000000000113211462017272200201000ustar00rootroot00000000000000from __future__ import annotations import functools from abc import ABC, abstractmethod from typing import Any, cast from deprecated.sphinx import versionadded from limits import errors from limits.storage.registry import StorageRegistry from limits.typing import ( Awaitable, Callable, List, Optional, P, R, Tuple, Type, Union, ) from limits.util import LazyDependency def _wrap_errors( storage: Storage, fn: Callable[P, Awaitable[R]], ) -> Callable[P, Awaitable[R]]: @functools.wraps(fn) async def inner(*args: P.args, **kwargs: P.kwargs) -> R: # type: ignore[misc] try: return await fn(*args, **kwargs) except storage.base_exceptions as exc: if storage.wrap_exceptions: raise errors.StorageError(exc) from exc raise return inner @versionadded(version="2.1") class Storage(LazyDependency, metaclass=StorageRegistry): """ Base class to extend when implementing an async storage backend. """ STORAGE_SCHEME: Optional[List[str]] """The storage schemes to register against this implementation""" def __new__(cls, *args: Any, **kwargs: Any) -> Storage: # type: ignore[misc] inst = super().__new__(cls) for method in { "incr", "get", "get_expiry", "check", "reset", "clear", }: setattr(inst, method, _wrap_errors(inst, getattr(inst, method))) return inst def __init__( self, uri: Optional[str] = None, wrap_exceptions: bool = False, **options: Union[float, str, bool], ) -> None: """ :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. """ super().__init__() self.wrap_exceptions = wrap_exceptions @property @abstractmethod def base_exceptions(self) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: raise NotImplementedError @abstractmethod async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ raise NotImplementedError @abstractmethod async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ raise NotImplementedError @abstractmethod async def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ raise NotImplementedError @abstractmethod async def check(self) -> bool: """ check if storage is healthy """ raise NotImplementedError @abstractmethod async def reset(self) -> Optional[int]: """ reset storage to clear limits """ raise NotImplementedError @abstractmethod async def clear(self, key: str) -> None: """ resets the rate limit key :param key: the key to clear rate limits for """ raise NotImplementedError class MovingWindowSupport(ABC): """ Abstract base for storages that intend to support the moving window strategy """ def __new__(cls, *args: Any, **kwargs: Any) -> MovingWindowSupport: # type: ignore[misc] inst = super().__new__(cls) for method in { "acquire_entry", "get_moving_window", }: setattr( inst, method, _wrap_errors(cast(Storage, inst), getattr(inst, method)), ) return inst @abstractmethod async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ raise NotImplementedError @abstractmethod async def get_moving_window( self, key: str, limit: int, expiry: int ) -> Tuple[int, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ raise NotImplementedError limits-3.12.0/limits/aio/storage/etcd.py000066400000000000000000000112671462017272200201160ustar00rootroot00000000000000import asyncio import time import urllib.parse from typing import TYPE_CHECKING, Optional, Tuple, Type, Union from limits.aio.storage.base import Storage from limits.errors import ConcurrentUpdateError if TYPE_CHECKING: import aetcd class EtcdStorage(Storage): """ Rate limit storage with etcd as backend. Depends on :pypi:`aetcd`. """ STORAGE_SCHEME = ["async+etcd"] """The async storage scheme for etcd""" DEPENDENCIES = ["aetcd"] PREFIX = "limits" MAX_RETRIES = 5 def __init__( self, uri: str, max_retries: int = MAX_RETRIES, **options: str, ) -> None: """ :param uri: etcd location of the form ``async+etcd://host:port``, :param max_retries: Maximum number of attempts to retry in the case of concurrent updates to a rate limit key :param options: all remaining keyword arguments are passed directly to the constructor of :class:`aetcd.client.Client` :raise ConfigurationError: when :pypi:`aetcd` is not available """ parsed = urllib.parse.urlparse(uri) self.lib = self.dependencies["aetcd"].module self.storage: "aetcd.Client" = self.lib.Client( host=parsed.hostname, port=parsed.port, **options ) self.max_retries = max_retries @property def base_exceptions( self, ) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: # pragma: no cover return self.lib.ClientError # type: ignore[no-any-return] def prefixed_key(self, key: str) -> bytes: return f"{self.PREFIX}/{key}".encode() async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: retries = 0 etcd_key = self.prefixed_key(key) while retries < self.max_retries: now = time.time() lease = await self.storage.lease(expiry) window_end = now + expiry create_attempt = await self.storage.transaction( compare=[self.storage.transactions.create(etcd_key) == b"0"], success=[ self.storage.transactions.put( etcd_key, f"{amount}:{window_end}".encode(), lease=lease.id ) ], failure=[self.storage.transactions.get(etcd_key)], ) if create_attempt[0]: return amount else: cur = create_attempt[1][0][0][1] cur_value, window_end = cur.value.split(b":") window_end = float(window_end) if window_end <= now: await asyncio.gather( self.storage.revoke_lease(cur.lease), self.storage.delete(etcd_key), ) else: if elastic_expiry: await self.storage.refresh_lease(cur.lease) window_end = now + expiry new = int(cur_value) + amount if ( await self.storage.transaction( compare=[ self.storage.transactions.value(etcd_key) == cur.value ], success=[ self.storage.transactions.put( etcd_key, f"{new}:{window_end}".encode(), lease=cur.lease, ) ], failure=[], ) )[0]: return new retries += 1 raise ConcurrentUpdateError(key, retries) async def get(self, key: str) -> int: cur = await self.storage.get(self.prefixed_key(key)) if cur: amount, expiry = cur.value.split(b":") if float(expiry) > time.time(): return int(amount) return 0 async def get_expiry(self, key: str) -> int: cur = await self.storage.get(self.prefixed_key(key)) if cur: window_end = float(cur.value.split(b":")[1]) return int(window_end) return int(time.time()) async def check(self) -> bool: try: await self.storage.status() return True except: # noqa return False async def reset(self) -> Optional[int]: return (await self.storage.delete_prefix(f"{self.PREFIX}/".encode())).deleted async def clear(self, key: str) -> None: await self.storage.delete(self.prefixed_key(key)) limits-3.12.0/limits/aio/storage/memcached.py000066400000000000000000000113021462017272200210730ustar00rootroot00000000000000import time import urllib.parse from deprecated.sphinx import versionadded from limits.aio.storage.base import Storage from limits.typing import EmcacheClientP, Optional, Tuple, Type, Union @versionadded(version="2.1") class MemcachedStorage(Storage): """ Rate limit storage with memcached as backend. Depends on :pypi:`emcache` """ STORAGE_SCHEME = ["async+memcached"] """The storage scheme for memcached to be used in an async context""" DEPENDENCIES = ["emcache"] def __init__( self, uri: str, wrap_exceptions: bool = False, **options: Union[float, str, bool], ) -> None: """ :param uri: memcached location of the form ``async+memcached://host:port,host:port`` :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`emcache.Client` :raise ConfigurationError: when :pypi:`emcache` is not available """ parsed = urllib.parse.urlparse(uri) self.hosts = [] for host, port in ( loc.split(":") for loc in parsed.netloc.strip().split(",") if loc.strip() ): self.hosts.append((host, int(port))) self._options = options self._storage = None super().__init__(uri, wrap_exceptions=wrap_exceptions, **options) self.dependency = self.dependencies["emcache"].module @property def base_exceptions( self, ) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: # pragma: no cover return ( self.dependency.ClusterNoAvailableNodes, self.dependency.CommandError, ) async def get_storage(self) -> EmcacheClientP: if not self._storage: self._storage = await self.dependency.create_client( [self.dependency.MemcachedHostAddress(h, p) for h, p in self.hosts], **self._options, ) assert self._storage return self._storage async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ item = await (await self.get_storage()).get(key.encode("utf-8")) return item and int(item.value) or 0 async def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ await (await self.get_storage()).delete(key.encode("utf-8")) async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ storage = await self.get_storage() limit_key = key.encode("utf-8") expire_key = f"{key}/expires".encode() added = True try: await storage.add(limit_key, f"{amount}".encode(), exptime=expiry) except self.dependency.NotStoredStorageCommandError: added = False storage = await self.get_storage() if not added: value = await storage.increment(limit_key, amount) or amount if elastic_expiry: await storage.touch(limit_key, exptime=expiry) await storage.set( expire_key, str(expiry + time.time()).encode("utf-8"), exptime=expiry, noreply=False, ) return value else: await storage.set( expire_key, str(expiry + time.time()).encode("utf-8"), exptime=expiry, noreply=False, ) return amount async def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ storage = await self.get_storage() item = await storage.get(f"{key}/expires".encode()) return int(item and float(item.value) or time.time()) async def check(self) -> bool: """ Check if storage is healthy by calling the ``get`` command on the key ``limiter-check`` """ try: storage = await self.get_storage() await storage.get(b"limiter-check") return True except: # noqa return False async def reset(self) -> Optional[int]: raise NotImplementedError limits-3.12.0/limits/aio/storage/memory.py000066400000000000000000000133401462017272200205010ustar00rootroot00000000000000import asyncio import time from collections import Counter from deprecated.sphinx import versionadded import limits.typing from limits.aio.storage.base import MovingWindowSupport, Storage from limits.typing import Dict, List, Optional, Tuple, Type, Union class LockableEntry(asyncio.Lock): def __init__(self, expiry: int) -> None: self.atime = time.time() self.expiry = self.atime + expiry super().__init__() @versionadded(version="2.1") class MemoryStorage(Storage, MovingWindowSupport): """ rate limit storage using :class:`collections.Counter` as an in memory storage for fixed and elastic window strategies, and a simple list to implement moving window strategy. """ STORAGE_SCHEME = ["async+memory"] """ The storage scheme for in process memory storage for use in an async context """ def __init__( self, uri: Optional[str] = None, wrap_exceptions: bool = False, **_: str ) -> None: self.storage: limits.typing.Counter[str] = Counter() self.expirations: Dict[str, float] = {} self.events: Dict[str, List[LockableEntry]] = {} self.timer: Optional[asyncio.Task[None]] = None super().__init__(uri, wrap_exceptions=wrap_exceptions, **_) @property def base_exceptions( self, ) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: # pragma: no cover return ValueError async def __expire_events(self) -> None: for key in self.events.keys(): for event in list(self.events[key]): async with event: if event.expiry <= time.time() and event in self.events[key]: self.events[key].remove(event) for key in list(self.expirations.keys()): if self.expirations[key] <= time.time(): self.storage.pop(key, None) self.expirations.pop(key, None) async def __schedule_expiry(self) -> None: if not self.timer or self.timer.done(): self.timer = asyncio.create_task(self.__expire_events()) async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ await self.get(key) await self.__schedule_expiry() self.storage[key] += amount if elastic_expiry or self.storage[key] == amount: self.expirations[key] = time.time() + expiry return self.storage.get(key, amount) async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ if self.expirations.get(key, 0) <= time.time(): self.storage.pop(key, None) self.expirations.pop(key, None) return self.storage.get(key, 0) async def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ self.storage.pop(key, None) self.expirations.pop(key, None) self.events.pop(key, None) async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ if amount > limit: return False self.events.setdefault(key, []) await self.__schedule_expiry() timestamp = time.time() try: entry: Optional[LockableEntry] = self.events[key][limit - amount] except IndexError: entry = None if entry and entry.atime >= timestamp - expiry: return False else: self.events[key][:0] = [LockableEntry(expiry) for _ in range(amount)] return True async def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ return int(self.expirations.get(key, time.time())) async def get_num_acquired(self, key: str, expiry: int) -> int: """ returns the number of entries already acquired :param key: rate limit key to acquire an entry in :param expiry: expiry of the entry """ timestamp = time.time() return ( len([k for k in self.events[key] if k.atime >= timestamp - expiry]) if self.events.get(key) else 0 ) # FIXME: arg limit is not used async def get_moving_window( self, key: str, limit: int, expiry: int ) -> Tuple[int, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ timestamp = time.time() acquired = await self.get_num_acquired(key, expiry) for item in self.events.get(key, [])[::-1]: if item.atime >= timestamp - expiry: return int(item.atime), acquired return int(timestamp), acquired async def check(self) -> bool: """ check if storage is healthy """ return True async def reset(self) -> Optional[int]: num_items = max(len(self.storage), len(self.events)) self.storage.clear() self.expirations.clear() self.events.clear() return num_items limits-3.12.0/limits/aio/storage/mongodb.py000066400000000000000000000222601462017272200206170ustar00rootroot00000000000000from __future__ import annotations import asyncio import calendar import datetime import time from typing import Any, cast from deprecated.sphinx import versionadded from limits.aio.storage.base import MovingWindowSupport, Storage from limits.typing import Dict, Optional, ParamSpec, Tuple, Type, TypeVar, Union from limits.util import get_dependency P = ParamSpec("P") R = TypeVar("R") @versionadded(version="2.1") class MongoDBStorage(Storage, MovingWindowSupport): """ Rate limit storage with MongoDB as backend. Depends on :pypi:`motor` """ STORAGE_SCHEME = ["async+mongodb", "async+mongodb+srv"] """ The storage scheme for MongoDB for use in an async context """ DEPENDENCIES = ["motor.motor_asyncio", "pymongo"] def __init__( self, uri: str, database_name: str = "limits", wrap_exceptions: bool = False, **options: Union[float, str, bool], ) -> None: """ :param uri: uri of the form ``async+mongodb://[user:password]@host:port?...``, This uri is passed directly to :class:`~motor.motor_asyncio.AsyncIOMotorClient` :param database_name: The database to use for storing the rate limit collections. :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed to the constructor of :class:`~motor.motor_asyncio.AsyncIOMotorClient` :raise ConfigurationError: when the :pypi:`motor` or :pypi:`pymongo` are not available """ uri = uri.replace("async+mongodb", "mongodb", 1) super().__init__(uri, wrap_exceptions=wrap_exceptions, **options) self.dependency = self.dependencies["motor.motor_asyncio"] self.proxy_dependency = self.dependencies["pymongo"] self.lib_errors, _ = get_dependency("pymongo.errors") self.storage = self.dependency.module.AsyncIOMotorClient(uri, **options) # TODO: Fix this hack. It was noticed when running a benchmark # with FastAPI - however - doesn't appear in unit tests or in an isolated # use. Reference: https://jira.mongodb.org/browse/MOTOR-822 self.storage.get_io_loop = asyncio.get_running_loop self.__database_name = database_name self.__indices_created = False @property def base_exceptions( self, ) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: # pragma: no cover return self.lib_errors.PyMongoError # type: ignore @property def database(self): # type: ignore return self.storage.get_database(self.__database_name) async def create_indices(self) -> None: if not self.__indices_created: await asyncio.gather( self.database.counters.create_index("expireAt", expireAfterSeconds=0), self.database.windows.create_index("expireAt", expireAfterSeconds=0), ) self.__indices_created = True async def reset(self) -> Optional[int]: """ Delete all rate limit keys in the rate limit collections (counters, windows) """ num_keys = sum( await asyncio.gather( self.database.counters.count_documents({}), self.database.windows.count_documents({}), ) ) await asyncio.gather( self.database.counters.drop(), self.database.windows.drop() ) return cast(int, num_keys) async def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ await asyncio.gather( self.database.counters.find_one_and_delete({"_id": key}), self.database.windows.find_one_and_delete({"_id": key}), ) async def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ counter = await self.database.counters.find_one({"_id": key}) expiry = ( counter["expireAt"] if counter else datetime.datetime.now(datetime.timezone.utc) ) return calendar.timegm(expiry.timetuple()) async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ counter = await self.database.counters.find_one( { "_id": key, "expireAt": {"$gte": datetime.datetime.now(datetime.timezone.utc)}, }, projection=["count"], ) return counter and counter["count"] or 0 async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ await self.create_indices() expiration = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( seconds=expiry ) response = await self.database.counters.find_one_and_update( {"_id": key}, [ { "$set": { "count": { "$cond": { "if": {"$lt": ["$expireAt", "$$NOW"]}, "then": amount, "else": {"$add": ["$count", amount]}, } }, "expireAt": { "$cond": { "if": {"$lt": ["$expireAt", "$$NOW"]}, "then": expiration, "else": (expiration if elastic_expiry else "$expireAt"), } }, } }, ], upsert=True, projection=["count"], return_document=self.proxy_dependency.module.ReturnDocument.AFTER, ) return int(response["count"]) async def check(self) -> bool: """ Check if storage is healthy by calling :meth:`motor.motor_asyncio.AsyncIOMotorClient.server_info` """ try: await self.storage.server_info() return True except: # noqa: E722 return False async def get_moving_window( self, key: str, limit: int, expiry: int ) -> Tuple[int, int]: """ returns the starting point and the number of entries in the moving window :param str key: rate limit key :param int expiry: expiry of entry :return: (start of window, number of acquired entries) """ timestamp = time.time() result = await self.database.windows.aggregate( [ {"$match": {"_id": key}}, { "$project": { "entries": { "$filter": { "input": "$entries", "as": "entry", "cond": {"$gte": ["$$entry", timestamp - expiry]}, } } } }, {"$unwind": "$entries"}, { "$group": { "_id": "$_id", "min": {"$min": "$entries"}, "count": {"$sum": 1}, } }, ] ).to_list(length=1) if result: return (int(result[0]["min"]), result[0]["count"]) return (int(timestamp), 0) async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ await self.create_indices() if amount > limit: return False timestamp = time.time() try: updates: Dict[str, Any] = { # type: ignore "$push": {"entries": {"$each": [], "$position": 0, "$slice": limit}} } updates["$set"] = { "expireAt": ( datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(seconds=expiry) ) } updates["$push"]["entries"]["$each"] = [timestamp] * amount await self.database.windows.update_one( { "_id": key, "entries.%d" % (limit - amount): { "$not": {"$gte": timestamp - expiry} }, }, updates, upsert=True, ) return True except self.proxy_dependency.module.errors.DuplicateKeyError: return False limits-3.12.0/limits/aio/storage/redis.py000066400000000000000000000364431462017272200203100ustar00rootroot00000000000000import time import urllib from typing import TYPE_CHECKING, cast from deprecated.sphinx import versionadded from packaging.version import Version from limits.aio.storage.base import MovingWindowSupport, Storage from limits.errors import ConfigurationError from limits.typing import AsyncRedisClient, Dict, Optional, Tuple, Type, Union from limits.util import get_package_data if TYPE_CHECKING: import coredis import coredis.commands class RedisInteractor: RES_DIR = "resources/redis/lua_scripts" SCRIPT_MOVING_WINDOW = get_package_data(f"{RES_DIR}/moving_window.lua") SCRIPT_ACQUIRE_MOVING_WINDOW = get_package_data( f"{RES_DIR}/acquire_moving_window.lua" ) SCRIPT_CLEAR_KEYS = get_package_data(f"{RES_DIR}/clear_keys.lua") SCRIPT_INCR_EXPIRE = get_package_data(f"{RES_DIR}/incr_expire.lua") lua_moving_window: "coredis.commands.Script[bytes]" lua_acquire_window: "coredis.commands.Script[bytes]" lua_clear_keys: "coredis.commands.Script[bytes]" lua_incr_expire: "coredis.commands.Script[bytes]" PREFIX = "LIMITS" def prefixed_key(self, key: str) -> str: return f"{self.PREFIX}:{key}" async def _incr( self, key: str, expiry: int, connection: AsyncRedisClient, elastic_expiry: bool = False, amount: int = 1, ) -> int: """ increments the counter for a given rate limit key :param connection: Redis connection :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param amount: the number to increment by """ key = self.prefixed_key(key) value = await connection.incrby(key, amount) if elastic_expiry or value == amount: await connection.expire(key, expiry) return value async def _get(self, key: str, connection: AsyncRedisClient) -> int: """ :param connection: Redis connection :param key: the key to get the counter value for """ key = self.prefixed_key(key) return int(await connection.get(key) or 0) async def _clear(self, key: str, connection: AsyncRedisClient) -> None: """ :param key: the key to clear rate limits for :param connection: Redis connection """ key = self.prefixed_key(key) await connection.delete([key]) async def get_moving_window( self, key: str, limit: int, expiry: int ) -> Tuple[int, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ key = self.prefixed_key(key) timestamp = int(time.time()) window = await self.lua_moving_window.execute( [key], [int(timestamp - expiry), limit] ) if window: return tuple(window) # type: ignore return timestamp, 0 async def _acquire_entry( self, key: str, limit: int, expiry: int, connection: AsyncRedisClient, amount: int = 1, ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param connection: Redis connection """ key = self.prefixed_key(key) timestamp = time.time() acquired = await self.lua_acquire_window.execute( [key], [timestamp, limit, expiry, amount] ) return bool(acquired) async def _get_expiry(self, key: str, connection: AsyncRedisClient) -> int: """ :param key: the key to get the expiry for :param connection: Redis connection """ key = self.prefixed_key(key) return int(max(await connection.ttl(key), 0) + time.time()) async def _check(self, connection: AsyncRedisClient) -> bool: """ check if storage is healthy :param connection: Redis connection """ try: await connection.ping() return True except: # noqa return False @versionadded(version="2.1") class RedisStorage(RedisInteractor, Storage, MovingWindowSupport): """ Rate limit storage with redis as backend. Depends on :pypi:`coredis` """ STORAGE_SCHEME = ["async+redis", "async+rediss", "async+redis+unix"] """ The storage schemes for redis to be used in an async context """ DEPENDENCIES = {"coredis": Version("3.4.0")} def __init__( self, uri: str, connection_pool: Optional["coredis.ConnectionPool"] = None, wrap_exceptions: bool = False, **options: Union[float, str, bool], ) -> None: """ :param uri: uri of the form: - ``async+redis://[:password]@host:port`` - ``async+redis://[:password]@host:port/db`` - ``async+rediss://[:password]@host:port`` - ``async+redis+unix:///path/to/sock?db=0`` etc... This uri is passed directly to :meth:`coredis.Redis.from_url` with the initial ``async`` removed, except for the case of ``async+redis+unix`` where it is replaced with ``unix``. :param connection_pool: if provided, the redis client is initialized with the connection pool and any other params passed as :paramref:`options` :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`coredis.Redis` :raise ConfigurationError: when the redis library is not available """ uri = uri.replace("async+redis", "redis", 1) uri = uri.replace("redis+unix", "unix") super().__init__(uri, wrap_exceptions=wrap_exceptions, **options) self.dependency = self.dependencies["coredis"].module if connection_pool: self.storage = self.dependency.Redis( connection_pool=connection_pool, **options ) else: self.storage = self.dependency.Redis.from_url(uri, **options) self.initialize_storage(uri) @property def base_exceptions( self, ) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: # pragma: no cover return self.dependency.exceptions.RedisError # type: ignore[no-any-return] def initialize_storage(self, _uri: str) -> None: # all these methods are coroutines, so must be called with await self.lua_moving_window = self.storage.register_script(self.SCRIPT_MOVING_WINDOW) self.lua_acquire_window = self.storage.register_script( self.SCRIPT_ACQUIRE_MOVING_WINDOW ) self.lua_clear_keys = self.storage.register_script(self.SCRIPT_CLEAR_KEYS) self.lua_incr_expire = self.storage.register_script( RedisStorage.SCRIPT_INCR_EXPIRE ) async def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param amount: the number to increment by """ if elastic_expiry: return await super()._incr( key, expiry, self.storage, elastic_expiry, amount ) else: key = self.prefixed_key(key) return cast( int, await self.lua_incr_expire.execute([key], [expiry, amount]) ) async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ return await super()._get(key, self.storage) async def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ return await super()._clear(key, self.storage) async def acquire_entry( self, key: str, limit: int, expiry: int, amount: int = 1 ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ return await super()._acquire_entry(key, limit, expiry, self.storage, amount) async def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ return await super()._get_expiry(key, self.storage) async def check(self) -> bool: """ Check if storage is healthy by calling :meth:`coredis.Redis.ping` """ return await super()._check(self.storage) async def reset(self) -> Optional[int]: """ This function calls a Lua Script to delete keys prefixed with ``self.PREFIX`` in blocks of 5000. .. warning:: This operation was designed to be fast, but was not tested on a large production based system. Be careful with its usage as it could be slow on very large data sets. """ prefix = self.prefixed_key("*") return cast(int, await self.lua_clear_keys.execute([prefix])) @versionadded(version="2.1") class RedisClusterStorage(RedisStorage): """ Rate limit storage with redis cluster as backend Depends on :pypi:`coredis` """ STORAGE_SCHEME = ["async+redis+cluster"] """ The storage schemes for redis cluster to be used in an async context """ DEFAULT_OPTIONS: Dict[str, Union[float, str, bool]] = { "max_connections": 1000, } "Default options passed to :class:`coredis.RedisCluster`" def __init__( self, uri: str, wrap_exceptions: bool = False, **options: Union[float, str, bool], ) -> None: """ :param uri: url of the form ``async+redis+cluster://[:password]@host:port,host:port`` :param options: all remaining keyword arguments are passed directly to the constructor of :class:`coredis.RedisCluster` :raise ConfigurationError: when the coredis library is not available or if the redis host cannot be pinged. """ parsed = urllib.parse.urlparse(uri) parsed_auth: Dict[str, Union[float, str, bool]] = {} if parsed.username: parsed_auth["username"] = parsed.username if parsed.password: parsed_auth["password"] = parsed.password sep = parsed.netloc.find("@") + 1 cluster_hosts = [] for loc in parsed.netloc[sep:].split(","): host, port = loc.split(":") cluster_hosts.append({"host": host, "port": int(port)}) super(RedisStorage, self).__init__( uri, wrap_exceptions=wrap_exceptions, **options ) self.dependency = self.dependencies["coredis"].module self.storage: "coredis.RedisCluster[str]" = self.dependency.RedisCluster( startup_nodes=cluster_hosts, **{**self.DEFAULT_OPTIONS, **parsed_auth, **options}, ) self.initialize_storage(uri) async def reset(self) -> Optional[int]: """ Redis Clusters are sharded and deleting across shards can't be done atomically. Because of this, this reset loops over all keys that are prefixed with ``self.PREFIX`` and calls delete on them, one at a time. .. warning:: This operation was not tested with extremely large data sets. On a large production based system, care should be taken with its usage as it could be slow on very large data sets """ prefix = self.prefixed_key("*") keys = await self.storage.keys(prefix) count = 0 for key in keys: count += await self.storage.delete([key]) return count @versionadded(version="2.1") class RedisSentinelStorage(RedisStorage): """ Rate limit storage with redis sentinel as backend Depends on :pypi:`coredis` """ STORAGE_SCHEME = ["async+redis+sentinel"] """The storage scheme for redis accessed via a redis sentinel installation""" DEPENDENCIES = {"coredis.sentinel": Version("3.4.0")} def __init__( self, uri: str, service_name: Optional[str] = None, use_replicas: bool = True, sentinel_kwargs: Optional[Dict[str, Union[float, str, bool]]] = None, **options: Union[float, str, bool], ): """ :param uri: url of the form ``async+redis+sentinel://host:port,host:port/service_name`` :param service_name, optional: sentinel service name (if not provided in `uri`) :param use_replicas: Whether to use replicas for read only operations :param sentinel_kwargs, optional: kwargs to pass as ``sentinel_kwargs`` to :class:`coredis.sentinel.Sentinel` :param options: all remaining keyword arguments are passed directly to the constructor of :class:`coredis.sentinel.Sentinel` :raise ConfigurationError: when the coredis library is not available or if the redis primary host cannot be pinged. """ parsed = urllib.parse.urlparse(uri) sentinel_configuration = [] connection_options = options.copy() sentinel_options = sentinel_kwargs.copy() if sentinel_kwargs else {} parsed_auth: Dict[str, Union[float, str, bool]] = {} if parsed.username: parsed_auth["username"] = parsed.username if parsed.password: parsed_auth["password"] = parsed.password sep = parsed.netloc.find("@") + 1 for loc in parsed.netloc[sep:].split(","): host, port = loc.split(":") sentinel_configuration.append((host, int(port))) self.service_name = ( parsed.path.replace("/", "") if parsed.path else service_name ) if self.service_name is None: raise ConfigurationError("'service_name' not provided") super(RedisStorage, self).__init__() self.dependency = self.dependencies["coredis.sentinel"].module self.sentinel = self.dependency.Sentinel( sentinel_configuration, sentinel_kwargs={**parsed_auth, **sentinel_options}, **{**parsed_auth, **connection_options}, ) self.storage = self.sentinel.primary_for(self.service_name) self.storage_replica = self.sentinel.replica_for(self.service_name) self.use_replicas = use_replicas self.initialize_storage(uri) async def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ return await super()._get( key, self.storage_replica if self.use_replicas else self.storage ) async def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ return await super()._get_expiry( key, self.storage_replica if self.use_replicas else self.storage ) async def check(self) -> bool: """ Check if storage is healthy by calling :meth:`coredis.Redis.ping` on the replica. """ return await super()._check( self.storage_replica if self.use_replicas else self.storage ) limits-3.12.0/limits/aio/strategies.py000066400000000000000000000145251462017272200177050ustar00rootroot00000000000000""" Asynchronous rate limiting strategies """ from abc import ABC, abstractmethod from typing import cast from ..limits import RateLimitItem from ..storage import StorageTypes from ..util import WindowStats from .storage import MovingWindowSupport, Storage class RateLimiter(ABC): def __init__(self, storage: StorageTypes): assert isinstance(storage, Storage) self.storage: Storage = storage @abstractmethod async def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :param cost: The cost of this hit, default 1 """ raise NotImplementedError @abstractmethod async def test(self, item: RateLimitItem, *identifiers: str) -> bool: """ Check if the rate limit can be consumed :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit """ raise NotImplementedError @abstractmethod async def get_window_stats( self, item: RateLimitItem, *identifiers: str ) -> WindowStats: """ Query the reset time and remaining amount for the limit :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :return: (reset time, remaining)) """ raise NotImplementedError async def clear(self, item: RateLimitItem, *identifiers: str) -> None: return await self.storage.clear(item.key_for(*identifiers)) class MovingWindowRateLimiter(RateLimiter): """ Reference: :ref:`strategies:moving window` """ def __init__(self, storage: StorageTypes) -> None: if not ( hasattr(storage, "acquire_entry") or hasattr(storage, "get_moving_window") ): raise NotImplementedError( "MovingWindowRateLimiting is not implemented for storage " "of type %s" % storage.__class__ ) super().__init__(storage) async def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :param cost: The cost of this hit, default 1 """ return await cast(MovingWindowSupport, self.storage).acquire_entry( item.key_for(*identifiers), item.amount, item.get_expiry(), amount=cost ) async def test(self, item: RateLimitItem, *identifiers: str) -> bool: """ Check if the rate limit can be consumed :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit """ res = await cast(MovingWindowSupport, self.storage).get_moving_window( item.key_for(*identifiers), item.amount, item.get_expiry(), ) amount = res[1] return amount < item.amount async def get_window_stats( self, item: RateLimitItem, *identifiers: str ) -> WindowStats: """ returns the number of requests remaining within this limit. :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :return: (reset time, remaining) """ window_start, window_items = await cast( MovingWindowSupport, self.storage ).get_moving_window(item.key_for(*identifiers), item.amount, item.get_expiry()) reset = window_start + item.get_expiry() return WindowStats(reset, item.amount - window_items) class FixedWindowRateLimiter(RateLimiter): """ Reference: :ref:`strategies:fixed window` """ async def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :param cost: The cost of this hit, default 1 """ return ( await self.storage.incr( item.key_for(*identifiers), item.get_expiry(), elastic_expiry=False, amount=cost, ) <= item.amount ) async def test(self, item: RateLimitItem, *identifiers: str) -> bool: """ Check if the rate limit can be consumed :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit """ return await self.storage.get(item.key_for(*identifiers)) < item.amount async def get_window_stats( self, item: RateLimitItem, *identifiers: str ) -> WindowStats: """ Query the reset time and remaining amount for the limit :param item: the rate limit item :param identifiers: variable list of strings to uniquely identify the limit :return: reset time, remaining """ remaining = max( 0, item.amount - await self.storage.get(item.key_for(*identifiers)), ) reset = await self.storage.get_expiry(item.key_for(*identifiers)) return WindowStats(reset, remaining) class FixedWindowElasticExpiryRateLimiter(FixedWindowRateLimiter): """ Reference: :ref:`strategies:fixed window with elastic expiry` """ async def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: a :class:`limits.limits.RateLimitItem` instance :param identifiers: variable list of strings to uniquely identify the limit :param cost: The cost of this hit, default 1 """ amount = await self.storage.incr( item.key_for(*identifiers), item.get_expiry(), elastic_expiry=True, amount=cost, ) return amount <= item.amount STRATEGIES = { "fixed-window": FixedWindowRateLimiter, "fixed-window-elastic-expiry": FixedWindowElasticExpiryRateLimiter, "moving-window": MovingWindowRateLimiter, } limits-3.12.0/limits/errors.py000066400000000000000000000011621462017272200162700ustar00rootroot00000000000000""" errors and exceptions """ class ConfigurationError(Exception): """ Error raised when a configuration problem is encountered """ class ConcurrentUpdateError(Exception): """ Error raised when an update to limit fails due to concurrent updates """ def __init__(self, key: str, attempts: int) -> None: super().__init__(f"Unable to update {key} after {attempts} retries") class StorageError(Exception): """ Error raised when an error is encountered in a storage """ def __init__(self, storage_error: Exception) -> None: self.storage_error = storage_error limits-3.12.0/limits/limits.py000066400000000000000000000115171462017272200162620ustar00rootroot00000000000000""" """ from __future__ import annotations from functools import total_ordering from typing import Dict, NamedTuple, Optional, Tuple, Type, Union, cast from limits.typing import ClassVar, List def safe_string(value: Union[bytes, str, int]) -> str: """ converts a byte/str or int to a str """ if isinstance(value, bytes): return value.decode() return str(value) class Granularity(NamedTuple): seconds: int name: str TIME_TYPES = dict( day=Granularity(60 * 60 * 24, "day"), month=Granularity(60 * 60 * 24 * 30, "month"), year=Granularity(60 * 60 * 24 * 30 * 12, "year"), hour=Granularity(60 * 60, "hour"), minute=Granularity(60, "minute"), second=Granularity(1, "second"), ) GRANULARITIES: Dict[str, Type[RateLimitItem]] = {} class RateLimitItemMeta(type): def __new__( cls, name: str, parents: Tuple[type, ...], dct: Dict[str, Union[Granularity, List[str]]], ) -> RateLimitItemMeta: if "__slots__" not in dct: dct["__slots__"] = [] granularity = super().__new__(cls, name, parents, dct) if "GRANULARITY" in dct: GRANULARITIES[dct["GRANULARITY"][1]] = cast( Type[RateLimitItem], granularity ) return granularity # pylint: disable=no-member @total_ordering class RateLimitItem(metaclass=RateLimitItemMeta): """ defines a Rate limited resource which contains the characteristic namespace, amount and granularity multiples of the rate limiting window. :param amount: the rate limit amount :param multiples: multiple of the 'per' :attr:`GRANULARITY` (e.g. 'n' per 'm' seconds) :param namespace: category for the specific rate limit """ __slots__ = ["namespace", "amount", "multiples"] GRANULARITY: ClassVar[Granularity] """ A tuple describing the granularity of this limit as (number of seconds, name) """ def __init__( self, amount: int, multiples: Optional[int] = 1, namespace: str = "LIMITER" ): self.namespace = namespace self.amount = int(amount) self.multiples = int(multiples or 1) @classmethod def check_granularity_string(cls, granularity_string: str) -> bool: """ Checks if this instance matches a *granularity_string* of type ``n per hour``, ``n per minute`` etc, by comparing with :attr:`GRANULARITY` """ return granularity_string.lower() in cls.GRANULARITY.name def get_expiry(self) -> int: """ :return: the duration the limit is enforced for in seconds. """ return self.GRANULARITY.seconds * self.multiples def key_for(self, *identifiers: str) -> str: """ Constructs a key for the current limit and any additional identifiers provided. :param identifiers: a list of strings to append to the key :return: a string key identifying this resource with each identifier appended with a '/' delimiter. """ remainder = "/".join( [safe_string(k) for k in identifiers] + [ safe_string(self.amount), safe_string(self.multiples), self.GRANULARITY.name, ] ) return f"{self.namespace}/{remainder}" def __eq__(self, other: object) -> bool: if isinstance(other, RateLimitItem): return ( self.amount == other.amount and self.GRANULARITY == other.GRANULARITY and self.multiples == other.multiples ) return False def __repr__(self) -> str: return f"{self.amount} per {self.multiples} {self.GRANULARITY.name}" def __lt__(self, other: RateLimitItem) -> bool: return self.GRANULARITY.seconds < other.GRANULARITY.seconds def __hash__(self) -> int: return hash((self.namespace, self.amount, self.multiples, self.GRANULARITY)) class RateLimitItemPerYear(RateLimitItem): """ per year rate limited resource. """ GRANULARITY = TIME_TYPES["year"] """A year""" class RateLimitItemPerMonth(RateLimitItem): """ per month rate limited resource. """ GRANULARITY = TIME_TYPES["month"] """A month""" class RateLimitItemPerDay(RateLimitItem): """ per day rate limited resource. """ GRANULARITY = TIME_TYPES["day"] """A day""" class RateLimitItemPerHour(RateLimitItem): """ per hour rate limited resource. """ GRANULARITY = TIME_TYPES["hour"] """An hour""" class RateLimitItemPerMinute(RateLimitItem): """ per minute rate limited resource. """ GRANULARITY = TIME_TYPES["minute"] """A minute""" class RateLimitItemPerSecond(RateLimitItem): """ per second rate limited resource. """ GRANULARITY = TIME_TYPES["second"] """A second""" limits-3.12.0/limits/py.typed000066400000000000000000000000001462017272200160670ustar00rootroot00000000000000limits-3.12.0/limits/resources/000077500000000000000000000000001462017272200164145ustar00rootroot00000000000000limits-3.12.0/limits/resources/redis/000077500000000000000000000000001462017272200175225ustar00rootroot00000000000000limits-3.12.0/limits/resources/redis/lua_scripts/000077500000000000000000000000001462017272200220525ustar00rootroot00000000000000limits-3.12.0/limits/resources/redis/lua_scripts/acquire_moving_window.lua000066400000000000000000000007431462017272200271600ustar00rootroot00000000000000local timestamp = tonumber(ARGV[1]) local limit = tonumber(ARGV[2]) local expiry = tonumber(ARGV[3]) local amount = tonumber(ARGV[4]) if amount > limit then return false end local entry = redis.call('lindex', KEYS[1], limit - amount) if entry and tonumber(entry) >= timestamp - expiry then return false end for i = 1, amount do redis.call('lpush', KEYS[1], timestamp) end redis.call('ltrim', KEYS[1], 0, limit - 1) redis.call('expire', KEYS[1], expiry) return true limits-3.12.0/limits/resources/redis/lua_scripts/clear_keys.lua000066400000000000000000000002701462017272200246750ustar00rootroot00000000000000local keys = redis.call('keys', KEYS[1]) local res = 0 for i=1,#keys,5000 do res = res + redis.call( 'del', unpack(keys, i, math.min(i+4999, #keys)) ) end return res limits-3.12.0/limits/resources/redis/lua_scripts/incr_expire.lua000066400000000000000000000003031462017272200250600ustar00rootroot00000000000000local current local amount = tonumber(ARGV[2]) current = redis.call("incrby", KEYS[1], amount) if tonumber(current) == amount then redis.call("expire", KEYS[1], ARGV[1]) end return current limits-3.12.0/limits/resources/redis/lua_scripts/moving_window.lua000066400000000000000000000006161462017272200254460ustar00rootroot00000000000000local items = redis.call('lrange', KEYS[1], 0, tonumber(ARGV[2])) local expiry = tonumber(ARGV[1]) local a = 0 local oldest = nil for idx=1,#items do if tonumber(items[idx]) >= expiry then a = a + 1 local value = tonumber(items[idx]) if oldest == nil or value < oldest then oldest = value end else break end end return {oldest, a} limits-3.12.0/limits/storage/000077500000000000000000000000001462017272200160465ustar00rootroot00000000000000limits-3.12.0/limits/storage/__init__.py000066400000000000000000000050041462017272200201560ustar00rootroot00000000000000""" Implementations of storage backends to be used with :class:`limits.strategies.RateLimiter` strategies """ import urllib from typing import Union, cast import limits from ..errors import ConfigurationError from .base import MovingWindowSupport, Storage from .etcd import EtcdStorage from .memcached import MemcachedStorage from .memory import MemoryStorage from .mongodb import MongoDBStorage, MongoDBStorageBase from .redis import RedisStorage from .redis_cluster import RedisClusterStorage from .redis_sentinel import RedisSentinelStorage from .registry import SCHEMES StorageTypes = Union[Storage, "limits.aio.storage.Storage"] def storage_from_string( storage_string: str, **options: Union[float, str, bool] ) -> StorageTypes: """ Factory function to get an instance of the storage class based on the uri of the storage. In most cases using it should be sufficient instead of directly instantiating the storage classes. for example:: from limits.storage import storage_from_string memory = from_string("memory://") memcached = from_string("memcached://localhost:11211") redis = from_string("redis://localhost:6379") The same function can be used to construct the :ref:`storage:async storage` variants, for example:: from limits.storage import storage_from_string memory = storage_from_string("async+memory://") memcached = storage_from_string("async+memcached://localhost:11211") redis = storage_from_string("async+redis://localhost:6379") :param storage_string: a string of the form ``scheme://host:port``. More details about supported storage schemes can be found at :ref:`storage:storage scheme` :param options: all remaining keyword arguments are passed to the constructor matched by :paramref:`storage_string`. :raises ConfigurationError: when the :attr:`storage_string` cannot be mapped to a registered :class:`limits.storage.Storage` or :class:`limits.aio.storage.Storage` instance. """ scheme = urllib.parse.urlparse(storage_string).scheme if scheme not in SCHEMES: raise ConfigurationError("unknown storage scheme : %s" % storage_string) return cast(StorageTypes, SCHEMES[scheme](storage_string, **options)) __all__ = [ "storage_from_string", "Storage", "MovingWindowSupport", "EtcdStorage", "MongoDBStorageBase", "MemoryStorage", "MongoDBStorage", "RedisStorage", "RedisClusterStorage", "RedisSentinelStorage", "MemcachedStorage", ] limits-3.12.0/limits/storage/base.py000066400000000000000000000110231462017272200173270ustar00rootroot00000000000000from __future__ import annotations import functools import threading from abc import ABC, abstractmethod from typing import Any, cast from limits import errors from limits.storage.registry import StorageRegistry from limits.typing import ( Callable, List, Optional, P, R, Tuple, Type, Union, ) from limits.util import LazyDependency def _wrap_errors(storage: Storage, fn: Callable[P, R]) -> Callable[P, R]: @functools.wraps(fn) def inner(*args: P.args, **kwargs: P.kwargs) -> R: try: return fn(*args, **kwargs) except storage.base_exceptions as exc: if storage.wrap_exceptions: raise errors.StorageError(exc) from exc raise return inner class Storage(LazyDependency, metaclass=StorageRegistry): """ Base class to extend when implementing a storage backend. """ STORAGE_SCHEME: Optional[List[str]] """The storage schemes to register against this implementation""" def __new__(cls, *args: Any, **kwargs: Any) -> Storage: # type: ignore[misc] inst = super().__new__(cls) for method in { "incr", "get", "get_expiry", "check", "reset", "clear", }: setattr(inst, method, _wrap_errors(inst, getattr(inst, method))) return inst def __init__( self, uri: Optional[str] = None, wrap_exceptions: bool = False, **options: Union[float, str, bool], ): """ :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. """ self.lock = threading.RLock() super().__init__() self.wrap_exceptions = wrap_exceptions @property @abstractmethod def base_exceptions(self) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: raise NotImplementedError @abstractmethod def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ raise NotImplementedError @abstractmethod def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ raise NotImplementedError @abstractmethod def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ raise NotImplementedError @abstractmethod def check(self) -> bool: """ check if storage is healthy """ raise NotImplementedError @abstractmethod def reset(self) -> Optional[int]: """ reset storage to clear limits """ raise NotImplementedError @abstractmethod def clear(self, key: str) -> None: """ resets the rate limit key :param key: the key to clear rate limits for """ raise NotImplementedError class MovingWindowSupport(ABC): """ Abstract base for storages that intend to support the moving window strategy """ def __new__(cls, *args: Any, **kwargs: Any) -> MovingWindowSupport: # type: ignore[misc] inst = super().__new__(cls) for method in { "acquire_entry", "get_moving_window", }: setattr( inst, method, _wrap_errors(cast(Storage, inst), getattr(inst, method)), ) return inst @abstractmethod def acquire_entry(self, key: str, limit: int, expiry: int, amount: int = 1) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ raise NotImplementedError @abstractmethod def get_moving_window(self, key: str, limit: int, expiry: int) -> Tuple[int, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ raise NotImplementedError limits-3.12.0/limits/storage/etcd.py000066400000000000000000000106041462017272200173400ustar00rootroot00000000000000import time import urllib.parse from typing import TYPE_CHECKING, Optional, Tuple, Type, Union from limits.errors import ConcurrentUpdateError from limits.storage.base import Storage if TYPE_CHECKING: import etcd3 class EtcdStorage(Storage): """ Rate limit storage with etcd as backend. Depends on :pypi:`etcd3`. """ STORAGE_SCHEME = ["etcd"] """The storage scheme for etcd""" DEPENDENCIES = ["etcd3"] PREFIX = "limits" MAX_RETRIES = 5 def __init__( self, uri: str, max_retries: int = MAX_RETRIES, **options: str, ) -> None: """ :param uri: etcd location of the form ``etcd://host:port``, :param max_retries: Maximum number of attempts to retry in the case of concurrent updates to a rate limit key :param options: all remaining keyword arguments are passed directly to the constructor of :class:`etcd3.Etcd3Client` :raise ConfigurationError: when :pypi:`etcd3` is not available """ parsed = urllib.parse.urlparse(uri) self.lib = self.dependencies["etcd3"].module self.storage: "etcd3.Etcd3Client" = self.lib.client( parsed.hostname, parsed.port, **options ) self.max_retries = max_retries @property def base_exceptions( self, ) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: # pragma: no cover return self.lib.Etcd3Exception # type: ignore[no-any-return] def prefixed_key(self, key: str) -> bytes: return f"{self.PREFIX}/{key}".encode() def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: retries = 0 etcd_key = self.prefixed_key(key) while retries < self.max_retries: now = time.time() lease = self.storage.lease(expiry) window_end = now + expiry create_attempt = self.storage.transaction( compare=[self.storage.transactions.create(etcd_key) == "0"], success=[ self.storage.transactions.put( etcd_key, f"{amount}:{window_end}".encode(), lease=lease.id, ) ], failure=[self.storage.transactions.get(etcd_key)], ) if create_attempt[0]: return amount else: cur, meta = create_attempt[1][0][0] cur_value, window_end = cur.split(b":") window_end = float(window_end) if window_end <= now: self.storage.revoke_lease(meta.lease_id) self.storage.delete(etcd_key) else: if elastic_expiry: self.storage.refresh_lease(meta.lease_id) window_end = now + expiry new = int(cur_value) + amount if self.storage.transaction( compare=[self.storage.transactions.value(etcd_key) == cur], success=[ self.storage.transactions.put( etcd_key, f"{new}:{window_end}".encode(), lease=meta.lease_id, ) ], failure=[], )[0]: return new retries += 1 raise ConcurrentUpdateError(key, retries) def get(self, key: str) -> int: value, meta = self.storage.get(self.prefixed_key(key)) if value: amount, expiry = value.split(b":") if float(expiry) > time.time(): return int(amount) return 0 def get_expiry(self, key: str) -> int: value, _ = self.storage.get(self.prefixed_key(key)) if value: window_end = float(value.split(b":")[1]) return int(window_end) return int(time.time()) def check(self) -> bool: try: self.storage.status() return True except: # noqa return False def reset(self) -> Optional[int]: return self.storage.delete_prefix(f"{self.PREFIX}/").deleted def clear(self, key: str) -> None: self.storage.delete(self.prefixed_key(key)) limits-3.12.0/limits/storage/memcached.py000066400000000000000000000147601462017272200203360ustar00rootroot00000000000000import inspect import threading import time import urllib.parse from types import ModuleType from typing import cast from limits.errors import ConfigurationError from limits.storage.base import Storage from limits.typing import ( Callable, List, MemcachedClientP, Optional, P, R, Tuple, Type, Union, ) from limits.util import get_dependency class MemcachedStorage(Storage): """ Rate limit storage with memcached as backend. Depends on :pypi:`pymemcache`. """ STORAGE_SCHEME = ["memcached"] """The storage scheme for memcached""" DEPENDENCIES = ["pymemcache"] def __init__( self, uri: str, wrap_exceptions: bool = False, **options: Union[str, Callable[[], MemcachedClientP]], ) -> None: """ :param uri: memcached location of the form ``memcached://host:port,host:port``, ``memcached:///var/tmp/path/to/sock`` :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`pymemcache.client.base.PooledClient` or :class:`pymemcache.client.hash.HashClient` (if there are more than one hosts specified) :raise ConfigurationError: when :pypi:`pymemcache` is not available """ parsed = urllib.parse.urlparse(uri) self.hosts = [] for loc in parsed.netloc.strip().split(","): if not loc: continue host, port = loc.split(":") self.hosts.append((host, int(port))) else: # filesystem path to UDS if parsed.path and not parsed.netloc and not parsed.port: self.hosts = [parsed.path] # type: ignore self.dependency = self.dependencies["pymemcache"].module self.library = str(options.pop("library", "pymemcache.client")) self.cluster_library = str( options.pop("cluster_library", "pymemcache.client.hash") ) self.client_getter = cast( Callable[[ModuleType, List[Tuple[str, int]]], MemcachedClientP], options.pop("client_getter", self.get_client), ) self.options = options if not get_dependency(self.library): raise ConfigurationError( "memcached prerequisite not available." " please install %s" % self.library ) # pragma: no cover self.local_storage = threading.local() self.local_storage.storage = None super().__init__(uri, wrap_exceptions=wrap_exceptions) @property def base_exceptions( self, ) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: # pragma: no cover return self.dependency.MemcacheError # type: ignore[no-any-return] def get_client( self, module: ModuleType, hosts: List[Tuple[str, int]], **kwargs: str ) -> MemcachedClientP: """ returns a memcached client. :param module: the memcached module :param hosts: list of memcached hosts """ return cast( MemcachedClientP, ( module.HashClient(hosts, **kwargs) if len(hosts) > 1 else module.PooledClient(*hosts, **kwargs) ), ) def call_memcached_func( self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> R: if "noreply" in kwargs: argspec = inspect.getfullargspec(func) if not ("noreply" in argspec.args or argspec.varkw): kwargs.pop("noreply") return func(*args, **kwargs) @property def storage(self) -> MemcachedClientP: """ lazily creates a memcached client instance using a thread local """ if not (hasattr(self.local_storage, "storage") and self.local_storage.storage): dependency = get_dependency( self.cluster_library if len(self.hosts) > 1 else self.library )[0] if not dependency: raise ConfigurationError(f"Unable to import {self.cluster_library}") self.local_storage.storage = self.client_getter( dependency, self.hosts, **self.options ) return cast(MemcachedClientP, self.local_storage.storage) def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ return int(self.storage.get(key) or 0) def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ self.storage.delete(key) def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ if not self.call_memcached_func( self.storage.add, key, amount, expiry, noreply=False ): value = self.storage.incr(key, amount) or amount if elastic_expiry: self.call_memcached_func(self.storage.touch, key, expiry) self.call_memcached_func( self.storage.set, key + "/expires", expiry + time.time(), expire=expiry, noreply=False, ) return value else: self.call_memcached_func( self.storage.set, key + "/expires", expiry + time.time(), expire=expiry, noreply=False, ) return amount def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ return int(float(self.storage.get(key + "/expires") or time.time())) def check(self) -> bool: """ Check if storage is healthy by calling the ``get`` command on the key ``limiter-check`` """ try: self.call_memcached_func(self.storage.get, "limiter-check") return True except: # noqa return False def reset(self) -> Optional[int]: raise NotImplementedError limits-3.12.0/limits/storage/memory.py000066400000000000000000000126711462017272200177370ustar00rootroot00000000000000import threading import time from collections import Counter import limits.typing from limits.storage.base import MovingWindowSupport, Storage from limits.typing import Dict, List, Optional, Tuple, Type, Union class LockableEntry(threading._RLock): # type: ignore def __init__(self, expiry: float) -> None: self.atime = time.time() self.expiry = self.atime + expiry super().__init__() class MemoryStorage(Storage, MovingWindowSupport): """ rate limit storage using :class:`collections.Counter` as an in memory storage for fixed and elastic window strategies, and a simple list to implement moving window strategy. """ STORAGE_SCHEME = ["memory"] def __init__( self, uri: Optional[str] = None, wrap_exceptions: bool = False, **_: str ): self.storage: limits.typing.Counter[str] = Counter() self.expirations: Dict[str, float] = {} self.events: Dict[str, List[LockableEntry]] = {} self.timer = threading.Timer(0.01, self.__expire_events) self.timer.start() super().__init__(uri, wrap_exceptions=wrap_exceptions, **_) @property def base_exceptions( self, ) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: # pragma: no cover return ValueError def __expire_events(self) -> None: for key in list(self.events.keys()): for event in list(self.events[key]): with event: if event.expiry <= time.time() and event in self.events[key]: self.events[key].remove(event) for key in list(self.expirations.keys()): if self.expirations[key] <= time.time(): self.storage.pop(key, None) self.expirations.pop(key, None) def __schedule_expiry(self) -> None: if not self.timer.is_alive(): self.timer = threading.Timer(0.01, self.__expire_events) self.timer.start() def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param elastic_expiry: whether to keep extending the rate limit window every hit. :param amount: the number to increment by """ self.get(key) self.__schedule_expiry() self.storage[key] += amount if elastic_expiry or self.storage[key] == amount: self.expirations[key] = time.time() + expiry return self.storage.get(key, 0) def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ if self.expirations.get(key, 0) <= time.time(): self.storage.pop(key, None) self.expirations.pop(key, None) return self.storage.get(key, 0) def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ self.storage.pop(key, None) self.expirations.pop(key, None) self.events.pop(key, None) def acquire_entry(self, key: str, limit: int, expiry: int, amount: int = 1) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ if amount > limit: return False self.events.setdefault(key, []) self.__schedule_expiry() timestamp = time.time() try: entry = self.events[key][limit - amount] except IndexError: entry = None if entry and entry.atime >= timestamp - expiry: return False else: self.events[key][:0] = [LockableEntry(expiry) for _ in range(amount)] return True def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ return int(self.expirations.get(key, time.time())) def get_num_acquired(self, key: str, expiry: int) -> int: """ returns the number of entries already acquired :param key: rate limit key to acquire an entry in :param expiry: expiry of the entry """ timestamp = time.time() return ( len([k for k in self.events[key] if k.atime >= timestamp - expiry]) if self.events.get(key) else 0 ) def get_moving_window(self, key: str, limit: int, expiry: int) -> Tuple[int, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ timestamp = time.time() acquired = self.get_num_acquired(key, expiry) for item in self.events.get(key, [])[::-1]: if item.atime >= timestamp - expiry: return int(item.atime), acquired return int(timestamp), acquired def check(self) -> bool: """ check if storage is healthy """ return True def reset(self) -> Optional[int]: num_items = max(len(self.storage), len(self.events)) self.storage.clear() self.expirations.clear() self.events.clear() return num_items limits-3.12.0/limits/storage/mongodb.py000066400000000000000000000217751462017272200200610ustar00rootroot00000000000000from __future__ import annotations import calendar import datetime import time from abc import ABC, abstractmethod from typing import Any, cast from deprecated.sphinx import versionadded from limits.typing import ( Dict, MongoClient, MongoCollection, MongoDatabase, Optional, Tuple, Type, Union, ) from ..util import get_dependency from .base import MovingWindowSupport, Storage class MongoDBStorageBase(Storage, MovingWindowSupport, ABC): """ Rate limit storage with MongoDB as backend. Depends on :pypi:`pymongo`. """ DEPENDENCIES = ["pymongo"] def __init__( self, uri: str, database_name: str = "limits", wrap_exceptions: bool = False, **options: Union[int, str, bool], ) -> None: """ :param uri: uri of the form ``mongodb://[user:password]@host:port?...``, This uri is passed directly to :class:`~pymongo.mongo_client.MongoClient` :param database_name: The database to use for storing the rate limit collections. :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed to the constructor of :class:`~pymongo.mongo_client.MongoClient` :raise ConfigurationError: when the :pypi:`pymongo` library is not available """ super().__init__(uri, wrap_exceptions=wrap_exceptions, **options) self._database_name = database_name self.lib = self.dependencies["pymongo"].module self.lib_errors, _ = get_dependency("pymongo.errors") self._storage_uri = uri self._storage_options = options self._storage: Optional[MongoClient] = None @property def storage(self) -> MongoClient: if self._storage is None: self._storage = self._init_mongo_client( self._storage_uri, **self._storage_options ) self.__initialize_database() return self._storage @property def _database(self) -> MongoDatabase: return self.storage[self._database_name] @property def counters(self) -> MongoCollection: return self._database["counters"] @property def windows(self) -> MongoCollection: return self._database["windows"] @abstractmethod def _init_mongo_client( self, uri: Optional[str], **options: Union[int, str, bool] ) -> MongoClient: raise NotImplementedError() @property def base_exceptions( self, ) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: # pragma: no cover return self.lib_errors.PyMongoError # type: ignore def __initialize_database(self) -> None: self.counters.create_index("expireAt", expireAfterSeconds=0) self.windows.create_index("expireAt", expireAfterSeconds=0) def reset(self) -> Optional[int]: """ Delete all rate limit keys in the rate limit collections (counters, windows) """ num_keys = self.counters.count_documents({}) + self.windows.count_documents({}) self.counters.drop() self.windows.drop() return int(num_keys) def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ self.counters.find_one_and_delete({"_id": key}) self.windows.find_one_and_delete({"_id": key}) def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ counter = self.counters.find_one({"_id": key}) expiry = ( counter["expireAt"] if counter else datetime.datetime.now(datetime.timezone.utc) ) return calendar.timegm(expiry.timetuple()) def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ counter = self.counters.find_one( { "_id": key, "expireAt": {"$gte": datetime.datetime.now(datetime.timezone.utc)}, }, projection=["count"], ) return counter and counter["count"] or 0 def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param amount: the number to increment by """ expiration = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( seconds=expiry ) return int( self.counters.find_one_and_update( {"_id": key}, [ { "$set": { "count": { "$cond": { "if": {"$lt": ["$expireAt", "$$NOW"]}, "then": amount, "else": {"$add": ["$count", amount]}, } }, "expireAt": { "$cond": { "if": {"$lt": ["$expireAt", "$$NOW"]}, "then": expiration, "else": ( expiration if elastic_expiry else "$expireAt" ), } }, } }, ], upsert=True, projection=["count"], return_document=self.lib.ReturnDocument.AFTER, )["count"] ) def check(self) -> bool: """ Check if storage is healthy by calling :meth:`pymongo.mongo_client.MongoClient.server_info` """ try: self.storage.server_info() return True except: # noqa: E722 return False def get_moving_window(self, key: str, limit: int, expiry: int) -> Tuple[int, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ timestamp = time.time() result = list( self.windows.aggregate( [ {"$match": {"_id": key}}, { "$project": { "entries": { "$filter": { "input": "$entries", "as": "entry", "cond": {"$gte": ["$$entry", timestamp - expiry]}, } } } }, {"$unwind": "$entries"}, { "$group": { "_id": "$_id", "min": {"$min": "$entries"}, "count": {"$sum": 1}, } }, ] ) ) if result: return int(result[0]["min"]), result[0]["count"] return int(timestamp), 0 def acquire_entry(self, key: str, limit: int, expiry: int, amount: int = 1) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number of entries to acquire """ if amount > limit: return False timestamp = time.time() try: updates: Dict[str, Any] = { # type: ignore "$push": {"entries": {"$each": [], "$position": 0, "$slice": limit}} } updates["$set"] = { "expireAt": ( datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(seconds=expiry) ) } updates["$push"]["entries"]["$each"] = [timestamp] * amount self.windows.update_one( { "_id": key, "entries.%d" % (limit - amount): { "$not": {"$gte": timestamp - expiry} }, }, updates, upsert=True, ) return True except self.lib.errors.DuplicateKeyError: return False @versionadded(version="2.1") class MongoDBStorage(MongoDBStorageBase): STORAGE_SCHEME = ["mongodb", "mongodb+srv"] def _init_mongo_client( self, uri: Optional[str], **options: Union[int, str, bool] ) -> MongoClient: return cast(MongoClient, self.lib.MongoClient(uri, **options)) limits-3.12.0/limits/storage/redis.py000066400000000000000000000204041462017272200175260ustar00rootroot00000000000000from __future__ import annotations import time from typing import TYPE_CHECKING from packaging.version import Version from limits.typing import Optional, RedisClient, ScriptP, Tuple, Type, Union from ..util import get_package_data from .base import MovingWindowSupport, Storage if TYPE_CHECKING: import redis class RedisInteractor: RES_DIR = "resources/redis/lua_scripts" SCRIPT_MOVING_WINDOW = get_package_data(f"{RES_DIR}/moving_window.lua") SCRIPT_ACQUIRE_MOVING_WINDOW = get_package_data( f"{RES_DIR}/acquire_moving_window.lua" ) SCRIPT_CLEAR_KEYS = get_package_data(f"{RES_DIR}/clear_keys.lua") SCRIPT_INCR_EXPIRE = get_package_data(f"{RES_DIR}/incr_expire.lua") lua_moving_window: ScriptP[Tuple[int, int]] lua_acquire_window: ScriptP[bool] PREFIX = "LIMITS" def prefixed_key(self, key: str) -> str: return f"{self.PREFIX}:{key}" def get_moving_window(self, key: str, limit: int, expiry: int) -> Tuple[int, int]: """ returns the starting point and the number of entries in the moving window :param key: rate limit key :param expiry: expiry of entry :return: (start of window, number of acquired entries) """ key = self.prefixed_key(key) timestamp = time.time() window = self.lua_moving_window([key], [int(timestamp - expiry), limit]) return window or (int(timestamp), 0) def _incr( self, key: str, expiry: int, connection: RedisClient, elastic_expiry: bool = False, amount: int = 1, ) -> int: """ increments the counter for a given rate limit key :param connection: Redis connection :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param amount: the number to increment by """ key = self.prefixed_key(key) value = connection.incrby(key, amount) if elastic_expiry or value == amount: connection.expire(key, expiry) return value def _get(self, key: str, connection: RedisClient) -> int: """ :param connection: Redis connection :param key: the key to get the counter value for """ key = self.prefixed_key(key) return int(connection.get(key) or 0) def _clear(self, key: str, connection: RedisClient) -> None: """ :param key: the key to clear rate limits for :param connection: Redis connection """ key = self.prefixed_key(key) connection.delete(key) def _acquire_entry( self, key: str, limit: int, expiry: int, connection: RedisClient, amount: int = 1, ) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param connection: Redis connection :param amount: the number of entries to acquire """ key = self.prefixed_key(key) timestamp = time.time() acquired = self.lua_acquire_window([key], [timestamp, limit, expiry, amount]) return bool(acquired) def _get_expiry(self, key: str, connection: RedisClient) -> int: """ :param key: the key to get the expiry for :param connection: Redis connection """ key = self.prefixed_key(key) return int(max(connection.ttl(key), 0) + time.time()) def _check(self, connection: RedisClient) -> bool: """ :param connection: Redis connection check if storage is healthy """ try: return connection.ping() except: # noqa return False class RedisStorage(RedisInteractor, Storage, MovingWindowSupport): """ Rate limit storage with redis as backend. Depends on :pypi:`redis`. """ STORAGE_SCHEME = ["redis", "rediss", "redis+unix"] """The storage scheme for redis""" DEPENDENCIES = {"redis": Version("3.0")} def __init__( self, uri: str, connection_pool: Optional[redis.connection.ConnectionPool] = None, wrap_exceptions: bool = False, **options: Union[float, str, bool], ) -> None: """ :param uri: uri of the form ``redis://[:password]@host:port``, ``redis://[:password]@host:port/db``, ``rediss://[:password]@host:port``, ``redis+unix:///path/to/sock`` etc. This uri is passed directly to :func:`redis.from_url` except for the case of ``redis+unix://`` where it is replaced with ``unix://``. :param connection_pool: if provided, the redis client is initialized with the connection pool and any other params passed as :paramref:`options` :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`redis.Redis` :raise ConfigurationError: when the :pypi:`redis` library is not available """ super().__init__(uri, wrap_exceptions=wrap_exceptions, **options) self.dependency = self.dependencies["redis"].module uri = uri.replace("redis+unix", "unix") if not connection_pool: self.storage = self.dependency.from_url(uri, **options) else: self.storage = self.dependency.Redis( connection_pool=connection_pool, **options ) self.initialize_storage(uri) @property def base_exceptions( self, ) -> Union[Type[Exception], Tuple[Type[Exception], ...]]: # pragma: no cover return self.dependency.RedisError # type: ignore[no-any-return] def initialize_storage(self, _uri: str) -> None: self.lua_moving_window = self.storage.register_script(self.SCRIPT_MOVING_WINDOW) self.lua_acquire_window = self.storage.register_script( self.SCRIPT_ACQUIRE_MOVING_WINDOW ) self.lua_clear_keys = self.storage.register_script(self.SCRIPT_CLEAR_KEYS) self.lua_incr_expire = self.storage.register_script( RedisStorage.SCRIPT_INCR_EXPIRE ) def incr( self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1 ) -> int: """ increments the counter for a given rate limit key :param key: the key to increment :param expiry: amount in seconds for the key to expire in :param amount: the number to increment by """ if elastic_expiry: return super()._incr(key, expiry, self.storage, elastic_expiry, amount) else: key = self.prefixed_key(key) return int(self.lua_incr_expire([key], [expiry, amount])) def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ return super()._get(key, self.storage) def clear(self, key: str) -> None: """ :param key: the key to clear rate limits for """ return super()._clear(key, self.storage) def acquire_entry(self, key: str, limit: int, expiry: int, amount: int = 1) -> bool: """ :param key: rate limit key to acquire an entry in :param limit: amount of entries allowed :param expiry: expiry of the entry :param amount: the number to increment by """ return super()._acquire_entry(key, limit, expiry, self.storage, amount) def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ return super()._get_expiry(key, self.storage) def check(self) -> bool: """ check if storage is healthy """ return super()._check(self.storage) def reset(self) -> Optional[int]: """ This function calls a Lua Script to delete keys prefixed with ``self.PREFIX`` in blocks of 5000. .. warning:: This operation was designed to be fast, but was not tested on a large production based system. Be careful with its usage as it could be slow on very large data sets. """ prefix = self.prefixed_key("*") return int(self.lua_clear_keys([prefix])) limits-3.12.0/limits/storage/redis_cluster.py000066400000000000000000000124041462017272200212700ustar00rootroot00000000000000import urllib import warnings from typing import cast from deprecated.sphinx import versionchanged from packaging.version import Version from limits.errors import ConfigurationError from limits.storage.redis import RedisStorage from limits.typing import Dict, List, Optional, Tuple, Union @versionchanged( version="2.5.0", reason=""" Cluster support was provided by the :pypi:`redis-py-cluster` library which has been absorbed into the official :pypi:`redis` client. By default the :class:`redis.cluster.RedisCluster` client will be used however if the version of the package is lower than ``4.2.0`` the implementation will fallback to trying to use :class:`rediscluster.RedisCluster`. """, ) class RedisClusterStorage(RedisStorage): """ Rate limit storage with redis cluster as backend Depends on :pypi:`redis`. """ STORAGE_SCHEME = ["redis+cluster"] """The storage scheme for redis cluster""" DEFAULT_OPTIONS: Dict[str, Union[float, str, bool]] = { "max_connections": 1000, } "Default options passed to the :class:`~redis.cluster.RedisCluster`" DEPENDENCIES = { "redis": Version("4.2.0"), "rediscluster": Version("2.0.0"), # Deprecated since 2.6.0 } def __init__(self, uri: str, **options: Union[float, str, bool]) -> None: """ :param uri: url of the form ``redis+cluster://[:password]@host:port,host:port`` :param options: all remaining keyword arguments are passed directly to the constructor of :class:`redis.cluster.RedisCluster` :raise ConfigurationError: when the :pypi:`redis` library is not available or if the redis cluster cannot be reached. """ parsed = urllib.parse.urlparse(uri) parsed_auth: Dict[str, Union[float, str, bool]] = {} if parsed.username: parsed_auth["username"] = parsed.username if parsed.password: parsed_auth["password"] = parsed.password sep = parsed.netloc.find("@") + 1 cluster_hosts = [] for loc in parsed.netloc[sep:].split(","): host, port = loc.split(":") cluster_hosts.append((host, int(port))) self.storage = None self.using_redis_py = False self.__pick_storage( cluster_hosts, **{**self.DEFAULT_OPTIONS, **parsed_auth, **options} ) assert self.storage self.initialize_storage(uri) super(RedisStorage, self).__init__(uri, **options) def __pick_storage( self, cluster_hosts: List[Tuple[str, int]], **options: Union[float, str, bool] ) -> None: try: redis_py = self.dependencies["redis"].module startup_nodes = [redis_py.cluster.ClusterNode(*c) for c in cluster_hosts] self.storage = redis_py.cluster.RedisCluster( startup_nodes=startup_nodes, **options ) self.using_redis_py = True return except ConfigurationError: # pragma: no cover self.__use_legacy_cluster_implementation(cluster_hosts, **options) if not self.storage: raise ConfigurationError( ( "Unable to find an implementation for redis cluster" " Cluster support requires either redis-py>=4.2 or" " redis-py-cluster" ) ) def __use_legacy_cluster_implementation( self, cluster_hosts: List[Tuple[str, int]], **options: Union[float, str, bool] ) -> None: # pragma: no cover redis_cluster = self.dependencies["rediscluster"].module warnings.warn( ( "Using redis-py-cluster is deprecated as the library has been" " absorbed by redis-py (>=4.2). The support will be eventually " " removed from the limits library and is no longer tested " " against since version: 2.6. To get rid of this warning, " " uninstall redis-py-cluster and ensure redis-py>=4.2.0 is installed" ) ) self.storage = redis_cluster.RedisCluster( startup_nodes=[{"host": c[0], "port": c[1]} for c in cluster_hosts], **options, ) def reset(self) -> Optional[int]: """ Redis Clusters are sharded and deleting across shards can't be done atomically. Because of this, this reset loops over all keys that are prefixed with ``self.PREFIX`` and calls delete on them, one at a time. .. warning:: This operation was not tested with extremely large data sets. On a large production based system, care should be taken with its usage as it could be slow on very large data sets""" prefix = self.prefixed_key("*") if self.using_redis_py: count = 0 for primary in self.storage.get_primaries(): node = self.storage.get_redis_connection(primary) keys = node.keys(prefix) count += sum([node.delete(k.decode("utf-8")) for k in keys]) return count else: # pragma: no cover keys = self.storage.keys(prefix) return cast( int, sum([self.storage.delete(k.decode("utf-8")) for k in keys]) ) limits-3.12.0/limits/storage/redis_sentinel.py000066400000000000000000000074431462017272200214370ustar00rootroot00000000000000import urllib.parse from typing import TYPE_CHECKING from packaging.version import Version from limits.errors import ConfigurationError from limits.storage.redis import RedisStorage from limits.typing import Dict, Optional, Union if TYPE_CHECKING: import redis.sentinel class RedisSentinelStorage(RedisStorage): """ Rate limit storage with redis sentinel as backend Depends on :pypi:`redis` package """ STORAGE_SCHEME = ["redis+sentinel"] """The storage scheme for redis accessed via a redis sentinel installation""" DEPENDENCIES = {"redis.sentinel": Version("3.0")} def __init__( self, uri: str, service_name: Optional[str] = None, use_replicas: bool = True, sentinel_kwargs: Optional[Dict[str, Union[float, str, bool]]] = None, wrap_exceptions: bool = False, **options: Union[float, str, bool], ) -> None: """ :param uri: url of the form ``redis+sentinel://host:port,host:port/service_name`` :param service_name: sentinel service name (if not provided in :attr:`uri`) :param use_replicas: Whether to use replicas for read only operations :param sentinel_kwargs: kwargs to pass as :attr:`sentinel_kwargs` to :class:`redis.sentinel.Sentinel` :param wrap_exceptions: Whether to wrap storage exceptions in :exc:`limits.errors.StorageError` before raising it. :param options: all remaining keyword arguments are passed directly to the constructor of :class:`redis.sentinel.Sentinel` :raise ConfigurationError: when the redis library is not available or if the redis master host cannot be pinged. """ super(RedisStorage, self).__init__( uri, wrap_exceptions=wrap_exceptions, **options ) parsed = urllib.parse.urlparse(uri) sentinel_configuration = [] sentinel_options = sentinel_kwargs.copy() if sentinel_kwargs else {} parsed_auth: Dict[str, Union[float, str, bool]] = {} if parsed.username: parsed_auth["username"] = parsed.username if parsed.password: parsed_auth["password"] = parsed.password sep = parsed.netloc.find("@") + 1 for loc in parsed.netloc[sep:].split(","): host, port = loc.split(":") sentinel_configuration.append((host, int(port))) self.service_name = ( parsed.path.replace("/", "") if parsed.path else service_name ) if self.service_name is None: raise ConfigurationError("'service_name' not provided") sentinel_dep = self.dependencies["redis.sentinel"].module self.sentinel: "redis.sentinel.Sentinel" = sentinel_dep.Sentinel( sentinel_configuration, sentinel_kwargs={**parsed_auth, **sentinel_options}, **{**parsed_auth, **options}, ) self.storage = self.sentinel.master_for(self.service_name) self.storage_slave = self.sentinel.slave_for(self.service_name) self.use_replicas = use_replicas self.initialize_storage(uri) def get(self, key: str) -> int: """ :param key: the key to get the counter value for """ return super()._get( key, self.storage_slave if self.use_replicas else self.storage ) def get_expiry(self, key: str) -> int: """ :param key: the key to get the expiry for """ return super()._get_expiry( key, self.storage_slave if self.use_replicas else self.storage ) def check(self) -> bool: """ Check if storage is healthy by calling :class:`aredis.StrictRedis.ping` on the slave. """ return super()._check(self.storage_slave if self.use_replicas else self.storage) limits-3.12.0/limits/storage/registry.py000066400000000000000000000013041462017272200202660ustar00rootroot00000000000000from __future__ import annotations from abc import ABCMeta from limits.typing import Dict, List, Tuple, Union SCHEMES: Dict[str, StorageRegistry] = {} class StorageRegistry(ABCMeta): def __new__( mcs, name: str, bases: Tuple[type, ...], dct: Dict[str, Union[str, List[str]]] ) -> StorageRegistry: storage_scheme = dct.get("STORAGE_SCHEME", None) cls = super().__new__(mcs, name, bases, dct) if storage_scheme: if isinstance(storage_scheme, str): # noqa schemes = [storage_scheme] else: schemes = storage_scheme for scheme in schemes: SCHEMES[scheme] = cls return cls limits-3.12.0/limits/strategies.py000066400000000000000000000150421462017272200171300ustar00rootroot00000000000000""" Rate limiting strategies """ from abc import ABCMeta, abstractmethod from typing import Dict, Type, Union, cast from .limits import RateLimitItem from .storage import MovingWindowSupport, Storage, StorageTypes from .util import WindowStats class RateLimiter(metaclass=ABCMeta): def __init__(self, storage: StorageTypes): assert isinstance(storage, Storage) self.storage: Storage = storage @abstractmethod def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The cost of this hit, default 1 """ raise NotImplementedError @abstractmethod def test(self, item: RateLimitItem, *identifiers: str) -> bool: """ Check the rate limit without consuming from it. :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit """ raise NotImplementedError @abstractmethod def get_window_stats(self, item: RateLimitItem, *identifiers: str) -> WindowStats: """ Query the reset time and remaining amount for the limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :return: (reset time, remaining) """ raise NotImplementedError def clear(self, item: RateLimitItem, *identifiers: str) -> None: return self.storage.clear(item.key_for(*identifiers)) class MovingWindowRateLimiter(RateLimiter): """ Reference: :ref:`strategies:moving window` """ def __init__(self, storage: StorageTypes): if not ( hasattr(storage, "acquire_entry") or hasattr(storage, "get_moving_window") ): raise NotImplementedError( "MovingWindowRateLimiting is not implemented for storage " "of type %s" % storage.__class__ ) super().__init__(storage) def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The cost of this hit, default 1 :return: (reset time, remaining) """ return cast(MovingWindowSupport, self.storage).acquire_entry( item.key_for(*identifiers), item.amount, item.get_expiry(), amount=cost ) def test(self, item: RateLimitItem, *identifiers: str) -> bool: """ Check if the rate limit can be consumed :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit """ return ( cast(MovingWindowSupport, self.storage).get_moving_window( item.key_for(*identifiers), item.amount, item.get_expiry(), )[1] < item.amount ) def get_window_stats(self, item: RateLimitItem, *identifiers: str) -> WindowStats: """ returns the number of requests remaining within this limit. :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :return: tuple (reset time, remaining) """ window_start, window_items = cast( MovingWindowSupport, self.storage ).get_moving_window(item.key_for(*identifiers), item.amount, item.get_expiry()) reset = window_start + item.get_expiry() return WindowStats(reset, item.amount - window_items) class FixedWindowRateLimiter(RateLimiter): """ Reference: :ref:`strategies:fixed window` """ def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The cost of this hit, default 1 """ return ( self.storage.incr( item.key_for(*identifiers), item.get_expiry(), elastic_expiry=False, amount=cost, ) <= item.amount ) def test(self, item: RateLimitItem, *identifiers: str) -> bool: """ Check if the rate limit can be consumed :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit """ return self.storage.get(item.key_for(*identifiers)) < item.amount def get_window_stats(self, item: RateLimitItem, *identifiers: str) -> WindowStats: """ Query the reset time and remaining amount for the limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :return: (reset time, remaining) """ remaining = max(0, item.amount - self.storage.get(item.key_for(*identifiers))) reset = self.storage.get_expiry(item.key_for(*identifiers)) return WindowStats(reset, remaining) class FixedWindowElasticExpiryRateLimiter(FixedWindowRateLimiter): """ Reference: :ref:`strategies:fixed window with elastic expiry` """ def hit(self, item: RateLimitItem, *identifiers: str, cost: int = 1) -> bool: """ Consume the rate limit :param item: The rate limit item :param identifiers: variable list of strings to uniquely identify this instance of the limit :param cost: The cost of this hit, default 1 """ return ( self.storage.incr( item.key_for(*identifiers), item.get_expiry(), elastic_expiry=True, amount=cost, ) <= item.amount ) KnownStrategy = Union[ Type[FixedWindowRateLimiter], Type[FixedWindowElasticExpiryRateLimiter], Type[MovingWindowRateLimiter], ] STRATEGIES: Dict[str, KnownStrategy] = { "fixed-window": FixedWindowRateLimiter, "fixed-window-elastic-expiry": FixedWindowElasticExpiryRateLimiter, "moving-window": MovingWindowRateLimiter, } limits-3.12.0/limits/typing.py000066400000000000000000000063271462017272200162760ustar00rootroot00000000000000from typing import ( TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, NamedTuple, Optional, Tuple, Type, TypeVar, Union, ) from typing_extensions import ClassVar, Counter, ParamSpec, Protocol, TypeAlias Serializable = Union[int, str, float] R = TypeVar("R") R_co = TypeVar("R_co", covariant=True) P = ParamSpec("P") if TYPE_CHECKING: import coredis import coredis.commands.script import pymongo import redis class ItemP(Protocol): value: bytes flags: Optional[int] cas: Optional[int] class EmcacheClientP(Protocol): async def add( self, key: bytes, value: bytes, *, flags: int = 0, exptime: int = 0, noreply: bool = False, ) -> None: ... async def get(self, key: bytes, return_flags: bool = False) -> Optional[ItemP]: ... async def gets(self, key: bytes, return_flags: bool = False) -> Optional[ItemP]: ... async def increment( self, key: bytes, value: int, *, noreply: bool = False ) -> Optional[int]: ... async def delete(self, key: bytes, *, noreply: bool = False) -> None: ... async def set( self, key: bytes, value: bytes, *, flags: int = 0, exptime: int = 0, noreply: bool = False, ) -> None: ... async def touch( self, key: bytes, exptime: int, *, noreply: bool = False ) -> None: ... class MemcachedClientP(Protocol): def add( self, key: str, value: Serializable, expire: Optional[int] = 0, noreply: Optional[bool] = None, flags: Optional[int] = None, ) -> bool: ... def get(self, key: str, default: Optional[str] = None) -> bytes: ... def incr(self, key: str, value: int, noreply: Optional[bool] = False) -> int: ... def delete(self, key: str, noreply: Optional[bool] = None) -> Optional[bool]: ... def set( self, key: str, value: Serializable, expire: int = 0, noreply: Optional[bool] = None, flags: Optional[int] = None, ) -> bool: ... def touch( self, key: str, expire: Optional[int] = 0, noreply: Optional[bool] = None ) -> bool: ... AsyncRedisClient = Union["coredis.Redis[bytes]", "coredis.RedisCluster[bytes]"] RedisClient = Union["redis.Redis[bytes]", "redis.cluster.RedisCluster[bytes]"] class ScriptP(Protocol[R_co]): def __call__(self, keys: List[Serializable], args: List[Serializable]) -> R_co: ... MongoClient: TypeAlias = "pymongo.MongoClient[Dict[str, Any]]" # type:ignore[misc] MongoDatabase: TypeAlias = "pymongo.database.Database[Dict[str, Any]]" # type:ignore[misc] MongoCollection: TypeAlias = "pymongo.collection.Collection[Dict[str, Any]]" # type:ignore[misc] __all__ = [ "AsyncRedisClient", "Awaitable", "Callable", "ClassVar", "Counter", "Dict", "EmcacheClientP", "ItemP", "List", "MemcachedClientP", "MongoClient", "MongoCollection", "MongoDatabase", "NamedTuple", "Optional", "P", "ParamSpec", "Protocol", "ScriptP", "Serializable", "TypeVar", "R", "R_co", "RedisClient", "Tuple", "Type", "TypeVar", "Union", ] limits-3.12.0/limits/util.py000066400000000000000000000131571462017272200157400ustar00rootroot00000000000000""" """ import dataclasses import re import sys from collections import UserDict from types import ModuleType from typing import TYPE_CHECKING, cast import importlib_resources from packaging.version import Version from limits.typing import Dict, List, NamedTuple, Optional, Tuple, Type, Union from .errors import ConfigurationError from .limits import GRANULARITIES, RateLimitItem SEPARATORS = re.compile(r"[,;|]{1}") SINGLE_EXPR = re.compile( r""" \s*([0-9]+) \s*(/|\s*per\s*) \s*([0-9]+) *\s*(hour|minute|second|day|month|year)s?\s*""", re.IGNORECASE | re.VERBOSE, ) EXPR = re.compile( r"^{SINGLE}(:?{SEPARATORS}{SINGLE})*$".format( SINGLE=SINGLE_EXPR.pattern, SEPARATORS=SEPARATORS.pattern ), re.IGNORECASE | re.VERBOSE, ) class WindowStats(NamedTuple): """ Tuple to describe a rate limited window """ #: Time as seconds since the Epoch when this window will be reset reset_time: int #: Quantity remaining in this window remaining: int @dataclasses.dataclass class Dependency: name: str version_required: Optional[Version] version_found: Optional[Version] module: ModuleType if TYPE_CHECKING: _UserDict = UserDict[str, Dependency] else: _UserDict = UserDict class DependencyDict(_UserDict): Missing = Dependency("Missing", None, None, ModuleType("Missing")) def __getitem__(self, key: str) -> Dependency: dependency = super().__getitem__(key) if dependency == DependencyDict.Missing: raise ConfigurationError(f"{key} prerequisite not available") elif dependency.version_required and ( not dependency.version_found or dependency.version_found < dependency.version_required ): raise ConfigurationError( f"The minimum version of {dependency.version_required}" f" of {dependency.name} could not be found" ) return dependency class LazyDependency: """ Simple utility that provides an :attr:`dependency` to the child class to fetch any dependencies without having to import them explicitly. """ DEPENDENCIES: Union[Dict[str, Optional[Version]], List[str]] = [] """ The python modules this class has a dependency on. Used to lazily populate the :attr:`dependencies` """ def __init__(self) -> None: self._dependencies: DependencyDict = DependencyDict() @property def dependencies(self) -> DependencyDict: """ Cached mapping of the modules this storage depends on. This is done so that the module is only imported lazily when the storage is instantiated. :meta private: """ if not getattr(self, "_dependencies", None): dependencies = DependencyDict() mapping: Dict[str, Optional[Version]] if isinstance(self.DEPENDENCIES, list): mapping = {dependency: None for dependency in self.DEPENDENCIES} else: mapping = self.DEPENDENCIES for name, minimum_version in mapping.items(): dependency, version = get_dependency(name) if not dependency: dependencies[name] = DependencyDict.Missing else: dependencies[name] = Dependency( name, minimum_version, version, dependency ) self._dependencies = dependencies return self._dependencies def get_dependency(module_path: str) -> Tuple[Optional[ModuleType], Optional[Version]]: """ safe function to import a module at runtime """ try: if module_path not in sys.modules: __import__(module_path) root = module_path.split(".")[0] version = getattr(sys.modules[root], "__version__", "0.0.0") return sys.modules[module_path], Version(version) except ImportError: # pragma: no cover return None, None def get_package_data(path: str) -> bytes: return cast(bytes, importlib_resources.files("limits").joinpath(path).read_bytes()) def parse_many(limit_string: str) -> List[RateLimitItem]: """ parses rate limits in string notation containing multiple rate limits (e.g. ``1/second; 5/minute``) :param limit_string: rate limit string using :ref:`ratelimit-string` :raise ValueError: if the string notation is invalid. """ if not (isinstance(limit_string, str) and EXPR.match(limit_string)): raise ValueError("couldn't parse rate limit string '%s'" % limit_string) limits = [] for limit in SEPARATORS.split(limit_string): match = SINGLE_EXPR.match(limit) if match: amount, _, multiples, granularity_string = match.groups() granularity = granularity_from_string(granularity_string) limits.append( granularity(int(amount), multiples and int(multiples) or None) ) return limits def parse(limit_string: str) -> RateLimitItem: """ parses a single rate limit in string notation (e.g. ``1/second`` or ``1 per second``) :param limit_string: rate limit string using :ref:`ratelimit-string` :raise ValueError: if the string notation is invalid. """ return list(parse_many(limit_string))[0] def granularity_from_string(granularity_string: str) -> Type[RateLimitItem]: """ :param granularity_string: :raise ValueError: """ for granularity in GRANULARITIES.values(): if granularity.check_granularity_string(granularity_string): return granularity raise ValueError("no granularity matched for %s" % granularity_string) limits-3.12.0/limits/version.py000066400000000000000000000000571462017272200164430ustar00rootroot00000000000000""" empty file to be updated by versioneer """ limits-3.12.0/push-release.sh000077500000000000000000000002661462017272200160410ustar00rootroot00000000000000#!/bin/bash cur=$(git rev-parse --abbrev-ref HEAD) git checkout master git push origin master --tags git checkout stable git merge master git push origin stable git checkout $cur limits-3.12.0/pyproject.toml000066400000000000000000000002501462017272200160120ustar00rootroot00000000000000[tool.ruff] line-length = 88 indent-width = 4 [tool.ruff.format] quote-style = "double" indent-style = "space" skip-magic-trailing-comma = false line-ending = "auto" limits-3.12.0/pytest.ini000066400000000000000000000012061462017272200151310ustar00rootroot00000000000000[pytest] asyncio_mode = auto norecursedirs = build *.egg markers = unit: mark a test as a unit test. integration: mark a test as an integration test. redis: redis tests redis_sentinel: redis sentinel tests redis_cluster: redis cluster tests mongodb: mongodb tests memcached: memcached tests etcd: etcd tests addopts = --verbose --tb=short --capture=no -rfEsxX --cov=limits -m "not benchmark" -K filterwarnings = error module::ResourceWarning module::pytest.PytestUnraisableExceptionWarning ignore::DeprecationWarning:etcd3 ignore::DeprecationWarning:google.protobuf limits-3.12.0/requirements/000077500000000000000000000000001462017272200156245ustar00rootroot00000000000000limits-3.12.0/requirements/ci.txt000066400000000000000000000000131462017272200167520ustar00rootroot00000000000000-r dev.txt limits-3.12.0/requirements/dev.txt000066400000000000000000000001441462017272200171420ustar00rootroot00000000000000-r test.txt -r docs.txt ruff keyring mypy twine types-deprecated types-redis types-setuptools wheel limits-3.12.0/requirements/docs.txt000066400000000000000000000003221462017272200173120ustar00rootroot00000000000000-r main.txt furo==2024.5.6 Sphinx>=4,<8 sphinx-copybutton==0.5.2 sphinx-autobuild==2021.3.14 sphinxext-opengraph==0.9.1 sphinx-inline-tabs==2023.4.21 sphinx-paramlinks==0.6.0 sphinxcontrib-programoutput==0.17 limits-3.12.0/requirements/main.txt000066400000000000000000000001151462017272200173060ustar00rootroot00000000000000deprecated>=1.2 importlib_resources>=1.3 packaging>=21,<25 typing_extensions limits-3.12.0/requirements/storage/000077500000000000000000000000001462017272200172705ustar00rootroot00000000000000limits-3.12.0/requirements/storage/async-etcd.txt000066400000000000000000000000061462017272200220570ustar00rootroot00000000000000aetcd limits-3.12.0/requirements/storage/async-memcached.txt000066400000000000000000000001571462017272200230550ustar00rootroot00000000000000emcache>=0.6.1;python_version<"3.11" # not yet supported emcache>=1;python_version>="3.11" # not yet supported limits-3.12.0/requirements/storage/async-mongodb.txt000066400000000000000000000000141462017272200225640ustar00rootroot00000000000000motor>=3,<4 limits-3.12.0/requirements/storage/async-redis.txt000066400000000000000000000000221462017272200222440ustar00rootroot00000000000000coredis>=3.4.0,<5 limits-3.12.0/requirements/storage/etcd.txt000066400000000000000000000000061462017272200207440ustar00rootroot00000000000000etcd3 limits-3.12.0/requirements/storage/memcached.txt000066400000000000000000000000241462017272200217330ustar00rootroot00000000000000pymemcache>3,<5.0.0 limits-3.12.0/requirements/storage/mongodb.txt000066400000000000000000000000171462017272200214540ustar00rootroot00000000000000pymongo>4.1,<5 limits-3.12.0/requirements/storage/redis.txt000066400000000000000000000000371462017272200211370ustar00rootroot00000000000000redis>3,!=4.5.2,!=4.5.3,<6.0.0 limits-3.12.0/requirements/storage/rediscluster.txt000066400000000000000000000000351462017272200225370ustar00rootroot00000000000000redis>=4.2.0,!=4.5.2,!=4.5.3 limits-3.12.0/requirements/test.txt000066400000000000000000000007401462017272200173450ustar00rootroot00000000000000-r main.txt -r storage/etcd.txt -r storage/redis.txt -r storage/rediscluster.txt -r storage/mongodb.txt -r storage/memcached.txt -r storage/async-etcd.txt -r storage/async-memcached.txt -r storage/async-mongodb.txt -r storage/async-redis.txt # Pin to < 4 for aetcd compatibility protobuf>3.6.1,<4 # Test related packages coverage hiro>0.1.6 flaky lovely-pytest-docker pytest<8 pytest-asyncio==0.21.2 pytest-benchmark[histogram] pytest-cov pytest-lazy-fixture pytest-mock PyYAML limits-3.12.0/scripts/000077500000000000000000000000001462017272200145705ustar00rootroot00000000000000limits-3.12.0/scripts/github_release_notes.sh000077500000000000000000000004521462017272200213220ustar00rootroot00000000000000#!/bin/bash TAG=$(echo $GITHUB_REF | cut -d / -f 3) git format-patch -1 $TAG --stdout | grep -P '^\+' | \ sed '1,4d' | \ grep -v "Release Date" | \ sed -E -e 's/^\+(.*)/\1/' -e 's/^\*(.*)/## \1/' -e 's/^ //' -e 's/\:(.*)\:(.*)/\2/' | \ sed -E -e 's/`(.*) <(https.*)>`_/[\1](\2)/' limits-3.12.0/setup.cfg000066400000000000000000000014161462017272200147240ustar00rootroot00000000000000[versioneer] VCS = git style = pep440-pre versionfile_source = limits/_version.py versionfile_build = limits/_version.py tag_prefix = parentdir_prefix = limits- [flake8] exclude = build/**,doc/**,_version.py,version.py,versioneer.py max_line_length=100 ignore = W503 [mypy] strict = True check_untyped_defs = True disallow_any_generics = True disallow_any_unimported = True disallow_any_decorated = True disallow_any_explicit = True disallow_incomplete_defs = True disallow_untyped_calls = True disallow_untyped_defs = True disallow_untyped_decorators = True show_error_codes = True warn_return_any = True warn_unused_ignores = True [mypy-limits.storage.etcd] ignore_errors = True [mypy-limits.aio.storage.etcd] ignore_errors = True [mypy-limits._version] ignore_errors = True limits-3.12.0/setup.py000077500000000000000000000036101462017272200146160ustar00rootroot00000000000000""" setup.py for limits """ __author__ = "Ali-Akber Saifee" __email__ = "ali@indydevs.org" __copyright__ = "Copyright 2023, Ali-Akber Saifee" import itertools import os from setuptools import find_packages, setup import versioneer THIS_DIR = os.path.abspath(os.path.dirname(__file__)) def get_requirements(req_file): requirements = [] for r in open(os.path.join(THIS_DIR, "requirements", req_file)).read().splitlines(): if r.strip(): requirements.append(r.strip()) return requirements EXTRA_REQUIREMENTS = { "redis": get_requirements("storage/redis.txt"), "rediscluster": get_requirements("storage/rediscluster.txt"), "memcached": get_requirements("storage/memcached.txt"), "mongodb": get_requirements("storage/mongodb.txt"), "etcd": get_requirements("storage/etcd.txt"), "async-redis": get_requirements("storage/async-redis.txt"), "async-memcached": get_requirements("storage/async-memcached.txt"), "async-mongodb": get_requirements("storage/async-mongodb.txt"), "async-etcd": get_requirements("storage/async-etcd.txt"), } EXTRA_REQUIREMENTS["all"] = list(itertools.chain(*EXTRA_REQUIREMENTS.values())) setup( name="limits", author=__author__, author_email=__email__, license="MIT", url="https://limits.readthedocs.org", project_urls={ "Source": "https://github.com/alisaifee/limits", }, zip_safe=False, version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), install_requires=get_requirements("main.txt"), classifiers=[k for k in open("CLASSIFIERS").read().split("\n") if k], description="Rate limiting utilities", long_description=open("README.rst").read(), packages=find_packages(exclude=["tests*"]), python_requires=">=3.8", extras_require=EXTRA_REQUIREMENTS, include_package_data=True, package_data={ "limits": ["py.typed"], }, ) limits-3.12.0/tag.sh000077500000000000000000000022301462017272200142100ustar00rootroot00000000000000#!/bin/bash last_tag=$(git tag | sort --version-sort -r | head -n 1) echo current version:$(python setup.py --version), current tag: $last_tag read -p "new version:" new_version last_portion=$(grep -P "^Changelog$" HISTORY.rst -5 | grep -P "^v\d+.\d+") changelog_file=/var/tmp/limiter.newchangelog new_changelog_heading="v${new_version}" new_changelog_heading_sep=$(python -c "print('-'*len('$new_changelog_heading'))") echo $new_changelog_heading > $changelog_file echo $new_changelog_heading_sep >> $changelog_file echo "Release Date: `date +"%Y-%m-%d"`" >> $changelog_file python -c "print(open('HISTORY.rst').read().replace('$last_portion', open('$changelog_file').read() +'\n' + '$last_portion'))" > HISTORY.rst.new cp HISTORY.rst.new HISTORY.rst vim -O HISTORY.rst <(echo \# vim:filetype=git;git log $last_tag..HEAD --format='* %s (%h)%n%b' | sed -E '/^\*/! s/(.*)/ \1/g') if rst2html.py HISTORY.rst > /dev/null then echo "Tag $new_version" git add HISTORY.rst git commit -m "Update changelog for ${new_version}" git tag -s ${new_version} -m "Tag version ${new_version}" rm HISTORY.rst.new else echo changelog has errors. skipping tag. fi; limits-3.12.0/tests/000077500000000000000000000000001462017272200142435ustar00rootroot00000000000000limits-3.12.0/tests/__init__.py000066400000000000000000000000001462017272200163420ustar00rootroot00000000000000limits-3.12.0/tests/aio/000077500000000000000000000000001462017272200150135ustar00rootroot00000000000000limits-3.12.0/tests/aio/__init__.py000066400000000000000000000000001462017272200171120ustar00rootroot00000000000000limits-3.12.0/tests/aio/test_storage.py000066400000000000000000000300121462017272200200640ustar00rootroot00000000000000import time import pytest from limits import RateLimitItemPerMinute, RateLimitItemPerSecond from limits.aio.storage import ( EtcdStorage, MemcachedStorage, MemoryStorage, MongoDBStorage, MovingWindowSupport, RedisClusterStorage, RedisSentinelStorage, RedisStorage, Storage, ) from limits.aio.strategies import MovingWindowRateLimiter from limits.errors import StorageError from limits.storage import storage_from_string from tests.utils import fixed_start @pytest.mark.asyncio class TestBaseStorage: async def test_pluggable_storage_no_moving_window(self): class MyStorage(Storage): STORAGE_SCHEME = ["async+mystorage"] @property def base_exceptions(self): return ValueError async def incr(self, key, expiry, elastic_expiry=False): return async def get(self, key): return 0 async def get_expiry(self, key): return time.time() async def reset(self): return async def check(self): return async def clear(self): return storage = storage_from_string("async+mystorage://") assert isinstance(storage, MyStorage) with pytest.raises(NotImplementedError): MovingWindowRateLimiter(storage) async def test_pluggable_storage_moving_window(self): class MyStorage(Storage): STORAGE_SCHEME = ["async+mystorage"] @property def base_exceptions(self): return ValueError async def incr(self, key, expiry, elastic_expiry=False): return async def get(self, key): return 0 async def get_expiry(self, key): return time.time() async def reset(self): return async def check(self): return async def clear(self): return async def acquire_entry(self, *a, **k): return True async def get_moving_window(self, *a, **k): return (time.time(), 1) storage = storage_from_string("async+mystorage://") assert isinstance(storage, MyStorage) MovingWindowRateLimiter(storage) @pytest.mark.asyncio @pytest.mark.parametrize( "uri, args, expected_instance, fixture", [ pytest.param("async+memory://", {}, MemoryStorage, None, id="in-memory"), pytest.param( "async+redis://localhost:7379", {}, RedisStorage, pytest.lazy_fixture("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "async+redis+unix:///tmp/limits.redis.sock", {}, RedisStorage, pytest.lazy_fixture("redis_uds"), marks=pytest.mark.redis, id="redis-uds", ), pytest.param( "async+redis+unix://:password/tmp/limits.redis.sock", {}, RedisStorage, pytest.lazy_fixture("redis_uds"), marks=pytest.mark.redis, id="redis-uds-auth", ), pytest.param( "async+memcached://localhost:22122", {}, MemcachedStorage, pytest.lazy_fixture("memcached"), marks=pytest.mark.memcached, id="memcached", ), pytest.param( "async+memcached://localhost:22122,localhost:22123", {}, MemcachedStorage, pytest.lazy_fixture("memcached_cluster"), marks=pytest.mark.memcached, id="memcached-cluster", ), pytest.param( "async+redis+sentinel://localhost:26379", {"service_name": "mymaster"}, RedisSentinelStorage, pytest.lazy_fixture("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "async+redis+sentinel://localhost:26379/mymaster", {}, RedisSentinelStorage, pytest.lazy_fixture("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel-service-name-url", ), pytest.param( "async+redis+sentinel://:sekret@localhost:36379/mymaster", {"password": "sekret"}, RedisSentinelStorage, pytest.lazy_fixture("redis_sentinel_auth"), marks=pytest.mark.redis_sentinel, id="redis-sentinel-auth", ), pytest.param( "async+redis+cluster://localhost:7001/", {}, RedisClusterStorage, pytest.lazy_fixture("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "async+redis+cluster://:sekret@localhost:8400/", {}, RedisClusterStorage, pytest.lazy_fixture("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "async+mongodb://localhost:37017/", {}, MongoDBStorage, pytest.lazy_fixture("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "async+etcd://localhost:2379", {}, EtcdStorage, pytest.lazy_fixture("etcd"), marks=pytest.mark.etcd, id="etcd", ), ], ) class TestConcreteStorages: async def test_storage_string(self, uri, args, expected_instance, fixture): assert isinstance(storage_from_string(uri, **args), expected_instance) @fixed_start async def test_expiry_incr(self, uri, args, expected_instance, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerSecond(1) await storage.incr(limit.key_for(), limit.get_expiry()) time.sleep(1.1) assert await storage.get(limit.key_for()) == 0 @fixed_start async def test_expiry_acquire_entry(self, uri, args, expected_instance, fixture): if not issubclass(expected_instance, MovingWindowSupport): pytest.skip("%s does not support acquire entry" % expected_instance) storage = storage_from_string(uri, **args) limit = RateLimitItemPerSecond(1) assert await storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry() ) time.sleep(1.1) assert await storage.get(limit.key_for()) == 0 async def test_incr_custom_amount(self, uri, args, expected_instance, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerMinute(1) assert 1 == await storage.incr(limit.key_for(), limit.get_expiry(), amount=1) assert 11 == await storage.incr(limit.key_for(), limit.get_expiry(), amount=10) async def test_acquire_entry_custom_amount( self, uri, args, expected_instance, fixture ): if not issubclass(expected_instance, MovingWindowSupport): pytest.skip("%s does not support acquire entry" % expected_instance) storage = storage_from_string(uri, **args) limit = RateLimitItemPerMinute(10) assert not await storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=11 ) assert await storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=1 ) assert not await storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=10 ) async def test_storage_check(self, uri, args, expected_instance, fixture): assert await storage_from_string(uri, **args).check() async def test_storage_reset(self, uri, args, expected_instance, fixture): if expected_instance == MemcachedStorage: pytest.skip("Reset not supported for memcached") limit1 = RateLimitItemPerMinute(10) # default namespace, LIMITER limit2 = RateLimitItemPerMinute(10, namespace="OTHER") storage = storage_from_string(uri, **args) for i in range(10): await storage.incr(limit1.key_for(str(i)), limit1.get_expiry()) await storage.incr(limit2.key_for(str(i)), limit2.get_expiry()) assert await storage.reset() == 20 async def test_storage_clear(self, uri, args, expected_instance, fixture): limit = RateLimitItemPerMinute(10) storage = storage_from_string(uri, **args) await storage.incr(limit.key_for(), limit.get_expiry()) assert 1 == await storage.get(limit.key_for()) await storage.clear(limit.key_for()) assert 0 == await storage.get(limit.key_for()) @pytest.mark.asyncio @pytest.mark.parametrize("wrap_exceptions", (True, False)) class TestStorageErrors: class MyStorage(Storage, MovingWindowSupport): STORAGE_SCHEME = ["mystorage"] class MyError(Exception): pass @property def base_exceptions(self): return self.MyError async def incr(self, key, expiry, elastic_expiry=False, amount=1): raise self.MyError() async def get(self, key): raise self.MyError() async def get_expiry(self, key): raise self.MyError() async def reset(self): raise self.MyError() async def check(self): raise self.MyError() async def clear(self, key): raise self.MyError() async def acquire_entry(self, key, limit, expiry, amount=1): raise self.MyError() async def get_moving_window(self, key, limit, expiry): raise self.MyError() def assert_exception(self, exc, wrap_exceptions): if wrap_exceptions: assert isinstance(exc, StorageError) assert isinstance(exc.storage_error, self.MyStorage.MyError) else: assert isinstance(exc, self.MyStorage.MyError) async def test_incr_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).incr("", 1) self.assert_exception(exc.value, wrap_exceptions) async def test_get_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).get("") self.assert_exception(exc.value, wrap_exceptions) async def test_get_expiry_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).get_expiry("") self.assert_exception(exc.value, wrap_exceptions) async def test_reset_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).reset() self.assert_exception(exc.value, wrap_exceptions) async def test_check_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).check() self.assert_exception(exc.value, wrap_exceptions) async def test_clear_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).clear("") self.assert_exception(exc.value, wrap_exceptions) async def test_acquire_entry_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).acquire_entry( "", 1, 1 ) self.assert_exception(exc.value, wrap_exceptions) async def test_get_moving_window_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: await self.MyStorage(wrap_exceptions=wrap_exceptions).get_moving_window( "", 1, 1 ) self.assert_exception(exc.value, wrap_exceptions) limits-3.12.0/tests/aio/test_strategy.py000066400000000000000000000204431462017272200202710ustar00rootroot00000000000000import time import pytest from limits.aio.storage import MemcachedStorage from limits.aio.strategies import ( FixedWindowElasticExpiryRateLimiter, FixedWindowRateLimiter, MovingWindowRateLimiter, ) from limits.limits import ( RateLimitItemPerHour, RateLimitItemPerMinute, RateLimitItemPerSecond, ) from limits.storage import storage_from_string from tests.utils import ( async_all_storage, async_moving_window_storage, async_window, fixed_start, ) @pytest.mark.asyncio class TestAsyncWindow: @async_all_storage @fixed_start async def test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) async with async_window(1) as (start, _): assert all([await limiter.hit(limit) for _ in range(0, 10)]) assert not await limiter.hit(limit) assert (await limiter.get_window_stats(limit)).remaining == 0 assert (await limiter.get_window_stats(limit)).reset_time == start + 2 @async_all_storage @fixed_start async def test_fixed_window_empty_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert (await limiter.get_window_stats(limit)).remaining == 10 assert (await limiter.get_window_stats(limit)).reset_time == int(time.time()) @async_moving_window_storage async def test_moving_window_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerMinute(2) assert await limiter.hit(limit, "key") time.sleep(1) assert await limiter.hit(limit, "key") time.sleep(1) assert not await limiter.hit(limit, "key") assert (await limiter.get_window_stats(limit, "key")).remaining == 0 assert (await limiter.get_window_stats(limit, "key")).reset_time - int( time.time() ) == 58 @async_all_storage @fixed_start async def test_fixed_window_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerMinute(10, 2) assert not await limiter.hit(limit, "k1", cost=11) assert await limiter.hit(limit, "k2", cost=5) assert (await limiter.get_window_stats(limit, "k2")).remaining == 5 assert not await limiter.hit(limit, "k2", cost=6) @async_all_storage @fixed_start async def test_fixed_window_with_elastic_expiry(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowElasticExpiryRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) async with async_window(1) as (start, end): assert all([await limiter.hit(limit) for _ in range(0, 10)]) assert not await limiter.hit(limit) assert (await limiter.get_window_stats(limit)).remaining == 0 assert (await limiter.get_window_stats(limit)).reset_time == start + 2 async with async_window(3) as (start, end): assert not await limiter.hit(limit) assert await limiter.hit(limit) assert (await limiter.get_window_stats(limit)).remaining == 9 assert (await limiter.get_window_stats(limit)).reset_time == end + 2 @async_all_storage @fixed_start async def test_fixed_window_with_elastic_expiry_multiple_cost( self, uri, args, fixture ): storage = storage_from_string(uri, **args) limiter = FixedWindowElasticExpiryRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert not await limiter.hit(limit, "k1", cost=11) async with async_window(0) as (_, end): assert await limiter.hit(limit, "k2", cost=5) assert (await limiter.get_window_stats(limit, "k2")).remaining == 5 assert (await limiter.get_window_stats(limit, "k2")).reset_time == end + 2 assert not await limiter.hit(limit, "k2", cost=6) @async_moving_window_storage async def test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) # 5 hits in the first 100ms async with async_window(0.1): assert all([await limiter.hit(limit) for i in range(5)]) # 5 hits in the last 100ms async with async_window(2, delay=1.8): assert all([await limiter.hit(limit) for i in range(5)]) # 11th fails assert not await limiter.hit(limit) # 5 more succeed since there were only 5 in the last 2 seconds assert all([await limiter.hit(limit) for i in range(5)]) assert (await limiter.get_window_stats(limit)).remaining == 0 @async_moving_window_storage async def test_moving_window_empty_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert (await limiter.get_window_stats(limit)).remaining == 10 assert (await limiter.get_window_stats(limit)).reset_time == int( time.time() + 2 ) @async_moving_window_storage async def test_moving_window_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert not await limiter.hit(limit, "k1", cost=11) # 5 hits in the first 100ms async with async_window(0.1): assert await limiter.hit(limit, "k2", cost=5) # 5 hits in the last 100ms async with async_window(2, delay=1.8): assert all([await limiter.hit(limit, "k2") for i in range(5)]) # 11th fails assert not await limiter.hit(limit, "k2") assert all([await limiter.hit(limit, "k2") for i in range(5)]) assert (await limiter.get_window_stats(limit, "k2")).remaining == 0 assert not await limiter.hit(limit, "k2", cost=2) @async_moving_window_storage async def test_moving_window_varying_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) five_per_min = RateLimitItemPerMinute(5) await limiter.hit(five_per_min, cost=5) assert not await limiter.hit(five_per_min, cost=2) await limiter.clear(five_per_min) assert await limiter.hit(five_per_min) @async_moving_window_storage async def test_moving_window_huge_cost_async(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) many_per_min = RateLimitItemPerMinute(1_000_000) await limiter.hit(many_per_min, cost=999_999) assert not await limiter.hit(many_per_min, cost=2) await limiter.clear(many_per_min) assert await limiter.hit(many_per_min) @pytest.mark.memcached async def test_moving_window_memcached(self, memcached): storage = MemcachedStorage("memcached://localhost:22122") with pytest.raises(NotImplementedError): MovingWindowRateLimiter(storage) @async_all_storage @fixed_start @pytest.mark.flaky async def test_test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerHour(2, 1) assert await limiter.hit(limit) assert await limiter.test(limit) assert await limiter.hit(limit) assert not await limiter.test(limit) assert not await limiter.hit(limit) @async_moving_window_storage async def test_test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerHour(2, 1) limiter = MovingWindowRateLimiter(storage) assert await limiter.hit(limit) assert await limiter.test(limit) assert await limiter.hit(limit) assert not await limiter.test(limit) assert not await limiter.hit(limit) limits-3.12.0/tests/benchmarks/000077500000000000000000000000001462017272200163605ustar00rootroot00000000000000limits-3.12.0/tests/benchmarks/__init__.py000066400000000000000000000000001462017272200204570ustar00rootroot00000000000000limits-3.12.0/tests/benchmarks/test_storage.py000066400000000000000000000036701462017272200214430ustar00rootroot00000000000000import functools import random import pytest import limits.aio.strategies from limits import RateLimitItemPerMinute from limits.storage import storage_from_string from limits.strategies import FixedWindowRateLimiter, MovingWindowRateLimiter from tests.utils import ( all_storage, async_all_storage, async_moving_window_storage, moving_window_storage, ) def hit_window(strategy, storage): limit = RateLimitItemPerMinute(500) uid = int(random.random() * 100) strategy(storage).hit(limit, uid) def hit_window_async(event_loop, strategy, storage): limit = RateLimitItemPerMinute(500) uid = int(random.random() * 100) event_loop.run_until_complete(strategy(storage).hit(limit, uid)) @all_storage @pytest.mark.benchmark(group="fixed-window") def test_fixed_window(benchmark, uri, args, fixture): benchmark( functools.partial( hit_window, FixedWindowRateLimiter, storage_from_string(uri, **args) ) ) @moving_window_storage @pytest.mark.benchmark(group="moving-window") def test_moving_window(benchmark, uri, args, fixture): benchmark( functools.partial( hit_window, MovingWindowRateLimiter, storage_from_string(uri, **args) ) ) @async_all_storage @pytest.mark.benchmark(group="async-fixed-window") def test_fixed_window_async(event_loop, benchmark, uri, args, fixture): benchmark( functools.partial( hit_window_async, event_loop, limits.aio.strategies.FixedWindowRateLimiter, storage_from_string(uri, **args), ) ) @async_moving_window_storage @pytest.mark.benchmark(group="async-moving-window") def test_moving_window_async(event_loop, benchmark, uri, args, fixture): benchmark( functools.partial( hit_window_async, event_loop, limits.aio.strategies.MovingWindowRateLimiter, storage_from_string(uri, **args), ) ) limits-3.12.0/tests/conftest.py000066400000000000000000000206631462017272200164510ustar00rootroot00000000000000import os import platform import socket import time import etcd3 import pymemcache import pymemcache.client import pymongo import pytest import redis import redis.sentinel def check_redis_cluster_ready(host, port): try: return redis.Redis(host, port).cluster("info")["cluster_state"] == "ok" except Exception: return False def check_redis_auth_cluster_ready(host, port): try: return ( redis.Redis(host, port, password="sekret").cluster("info")["cluster_state"] == "ok" ) except Exception: return False def check_redis_ssl_cluster_ready(host, port): storage_url = ( "rediss://localhost:8301/?ssl_cert_reqs=required" "&ssl_keyfile=./tests/tls/client.key" "&ssl_certfile=./tests/tls/client.crt" "&ssl_ca_certs=./tests/tls/ca.crt" ) try: return ( redis.Redis.from_url(storage_url).cluster("info")["cluster_state"] == "ok" ) except Exception: return False def check_sentinel_ready(host, port): try: return redis.sentinel.Sentinel([(host, port)]).master_for("mymaster").ping() except: # noqa return False def check_sentinel_auth_ready(host, port): try: return ( redis.sentinel.Sentinel( [(host, port)], sentinel_kwargs={"password": "sekret"}, password="sekret", ) .master_for("mymaster") .ping() ) except: # noqa return False def check_mongo_ready(host, port): try: pymongo.MongoClient("mongodb://localhost:37017").server_info() return True except: # noqa return False def check_etcd_ready(host, port): try: etcd3.client(host, port).status() return True except: # noqa return False @pytest.fixture(scope="session") def host_ip_env(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(("10.255.255.255", 1)) ip = s.getsockname()[0] except Exception: ip = "127.0.0.1" finally: s.close() os.environ["HOST_IP"] = str(ip) @pytest.fixture(scope="session") def docker_services(host_ip_env, docker_services): return docker_services @pytest.fixture(scope="session") def etcd_client(docker_services): docker_services.start("etcd") docker_services.wait_for_service("etcd", 2379, check_etcd_ready) if os.environ.get("CI") == "True": time.sleep(5) return etcd3.client() @pytest.fixture(scope="session") def redis_basic_client(docker_services): docker_services.start("redis-basic") return redis.StrictRedis("localhost", 7379) @pytest.fixture(scope="session") def redis_uds_client(docker_services): if platform.system().lower() == "darwin": pytest.skip("Fixture not supported on OSX") docker_services.start("redis-uds") return redis.from_url("unix:///tmp/limits.redis.sock") @pytest.fixture(scope="session") def redis_auth_client(docker_services): docker_services.start("redis-auth") return redis.from_url("redis://:sekret@localhost:7389") @pytest.fixture(scope="session") def redis_ssl_client(docker_services): docker_services.start("redis-ssl") storage_url = ( "rediss://localhost:8379/0?ssl_cert_reqs=required" "&ssl_keyfile=./tests/tls/client.key" "&ssl_certfile=./tests/tls/client.crt" "&ssl_ca_certs=./tests/tls/ca.crt" ) return redis.from_url(storage_url) @pytest.fixture(scope="session") def redis_cluster_client(docker_services): docker_services.start("redis-cluster-init") docker_services.wait_for_service("redis-cluster-6", 7006, check_redis_cluster_ready) if os.environ.get("CI") == "True": time.sleep(10) return redis.cluster.RedisCluster("localhost", 7001) @pytest.fixture(scope="session") def redis_auth_cluster_client(docker_services): docker_services.start("redis-cluster-auth-init") docker_services.wait_for_service( "redis-cluster-auth-3", 8402, check_redis_auth_cluster_ready ) if os.environ.get("CI") == "True": time.sleep(10) return redis.cluster.RedisCluster("localhost", 8400, password="sekret") @pytest.fixture(scope="session") def redis_ssl_cluster_client(docker_services): docker_services.start("redis-ssl-cluster-init") docker_services.wait_for_service( "redis-ssl-cluster-6", 8306, check_redis_ssl_cluster_ready ) if os.environ.get("CI") == "True": time.sleep(10) storage_url = ( "rediss://localhost:8301/?ssl_cert_reqs=required" "&ssl_keyfile=./tests/tls/client.key" "&ssl_certfile=./tests/tls/client.crt" "&ssl_ca_certs=./tests/tls/ca.crt" ) return redis.cluster.RedisCluster.from_url(storage_url) @pytest.fixture(scope="session") def redis_sentinel_client(docker_services): docker_services.start("redis-sentinel") docker_services.wait_for_service("redis-sentinel", 26379, check_sentinel_ready) return redis.sentinel.Sentinel([("localhost", 26379)]) @pytest.fixture(scope="session") def redis_sentinel_auth_client(docker_services): docker_services.start("redis-sentinel-auth") docker_services.wait_for_service( "redis-sentinel-auth", 26379, check_sentinel_auth_ready ) return redis.sentinel.Sentinel( [("localhost", 36379)], sentinel_kwargs={"password": "sekret"}, password="sekret", ) @pytest.fixture(scope="session") def memcached_client(docker_services): docker_services.start("memcached-1") return pymemcache.Client(("localhost", 22122)) @pytest.fixture(scope="session") def memcached_cluster_client(docker_services): docker_services.start("memcached-1") docker_services.start("memcached-2") return pymemcache.client.HashClient([("localhost", 22122), ("localhost", 22123)]) @pytest.fixture(scope="session") def memcached_uds_client(docker_services): if platform.system().lower() == "darwin": pytest.skip("Fixture not supported on OSX") docker_services.start("memcached-uds") return pymemcache.Client("/tmp/limits.memcached.sock") @pytest.fixture(scope="session") def mongodb_client(docker_services): docker_services.start("mongodb") docker_services.wait_for_service("mongodb", 27017, check_mongo_ready) return pymongo.MongoClient("mongodb://localhost:37017") @pytest.fixture def memcached(memcached_client): memcached_client.flush_all() return memcached_client @pytest.fixture def memcached_uds(memcached_uds_client): memcached_uds_client.flush_all() return memcached_uds_client @pytest.fixture def memcached_cluster(memcached_cluster_client): memcached_cluster_client.flush_all() return memcached_cluster_client @pytest.fixture def redis_basic(redis_basic_client): redis_basic_client.flushall() return redis_basic @pytest.fixture def redis_ssl(redis_ssl_client): redis_ssl_client.flushall() return redis_ssl_client @pytest.fixture def redis_auth(redis_auth_client): redis_auth_client.flushall() return redis_auth_client @pytest.fixture def redis_uds(redis_uds_client): redis_uds_client.flushall() return redis_uds_client @pytest.fixture def redis_cluster(redis_cluster_client): redis_cluster_client.flushall() return redis_cluster_client @pytest.fixture def redis_auth_cluster(redis_auth_cluster_client): redis_auth_cluster_client.flushall() return redis_auth_cluster_client @pytest.fixture def redis_ssl_cluster(redis_ssl_cluster_client): redis_ssl_cluster_client.flushall() return redis_ssl_cluster_client @pytest.fixture def redis_sentinel(redis_sentinel_client): redis_sentinel_client.master_for("mymaster").flushall() return redis_sentinel @pytest.fixture def redis_sentinel_auth(redis_sentinel_auth_client): redis_sentinel_auth_client.master_for("mymaster").flushall() return redis_sentinel_auth_client @pytest.fixture def mongodb(mongodb_client): mongodb_client.limits.windows.drop() mongodb_client.limits.counters.drop() return mongodb_client @pytest.fixture def etcd(etcd_client): etcd_client.delete_prefix("limits/") return etcd_client @pytest.fixture(scope="session") def docker_services_project_name(): return "limits" @pytest.fixture(scope="session") def docker_compose_files(pytestconfig): """Get the docker-compose.yml absolute path. Override this fixture in your tests if you need a custom location. """ return ["docker-compose.yml"] limits-3.12.0/tests/integration/000077500000000000000000000000001462017272200165665ustar00rootroot00000000000000limits-3.12.0/tests/integration/test_concurrency.py000066400000000000000000000057531462017272200225430ustar00rootroot00000000000000import asyncio import random import threading import time from uuid import uuid4 import pytest import limits.aio.strategies import limits.strategies from limits.limits import RateLimitItemPerMinute from limits.storage import storage_from_string from tests.utils import ( all_storage, async_all_storage, async_moving_window_storage, moving_window_storage, ) @pytest.mark.integration class TestConcurrency: @all_storage def test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = limits.strategies.FixedWindowRateLimiter(storage) limit = RateLimitItemPerMinute(5) [limiter.hit(limit, uuid4().hex) for _ in range(50)] key = uuid4().hex hits = [] def hit(): time.sleep(random.random()) if limiter.hit(limit, key): hits.append(None) threads = [threading.Thread(target=hit) for _ in range(50)] [t.start() for t in threads] [t.join() for t in threads] assert len(hits) == 5 @moving_window_storage def test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = limits.strategies.MovingWindowRateLimiter(storage) limit = RateLimitItemPerMinute(5) [limiter.hit(limit, uuid4().hex) for _ in range(50)] key = uuid4().hex hits = [] def hit(): time.sleep(random.random()) if limiter.hit(limit, key): hits.append(None) threads = [threading.Thread(target=hit) for _ in range(50)] [t.start() for t in threads] [t.join() for t in threads] assert len(hits) == 5 @pytest.mark.asyncio @pytest.mark.integration class TestAsyncConcurrency: @async_all_storage async def test_fixed_window(self, event_loop, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = limits.aio.strategies.FixedWindowRateLimiter(storage) limit = RateLimitItemPerMinute(5) [await limiter.hit(limit, uuid4().hex) for _ in range(50)] key = uuid4().hex hits = [] async def hit(): await asyncio.sleep(random.random()) if await limiter.hit(limit, key): hits.append(None) await asyncio.gather(*[hit() for _ in range(50)]) assert len(hits) == 5 @async_moving_window_storage async def test_moving_window(self, event_loop, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = limits.aio.strategies.MovingWindowRateLimiter(storage) limit = RateLimitItemPerMinute(5) [await limiter.hit(limit, uuid4().hex) for _ in range(50)] key = uuid4().hex hits = [] async def hit(): await asyncio.sleep(random.random()) if await limiter.hit(limit, key): hits.append(None) await asyncio.gather(*[hit() for _ in range(50)]) assert len(hits) == 5 limits-3.12.0/tests/test_limit_granularities.py000066400000000000000000000026131462017272200217250ustar00rootroot00000000000000from limits import limits class TestGranularity: def test_seconds_value(self): assert limits.RateLimitItemPerSecond(1).get_expiry() == 1 assert limits.RateLimitItemPerMinute(1).get_expiry() == 60 assert limits.RateLimitItemPerHour(1).get_expiry() == 60 * 60 assert limits.RateLimitItemPerDay(1).get_expiry() == 60 * 60 * 24 assert limits.RateLimitItemPerMonth(1).get_expiry() == 60 * 60 * 24 * 30 assert limits.RateLimitItemPerYear(1).get_expiry() == 60 * 60 * 24 * 30 * 12 def test_representation(self): assert "1 per 1 second" in str(limits.RateLimitItemPerSecond(1)) assert "1 per 1 minute" in str(limits.RateLimitItemPerMinute(1)) assert "1 per 1 hour" in str(limits.RateLimitItemPerHour(1)) assert "1 per 1 day" in str(limits.RateLimitItemPerDay(1)) assert "1 per 1 month" in str(limits.RateLimitItemPerMonth(1)) assert "1 per 1 year" in str(limits.RateLimitItemPerYear(1)) def test_comparison(self): assert limits.RateLimitItemPerSecond(1) < limits.RateLimitItemPerMinute(1) assert limits.RateLimitItemPerMinute(1) < limits.RateLimitItemPerHour(1) assert limits.RateLimitItemPerHour(1) < limits.RateLimitItemPerDay(1) assert limits.RateLimitItemPerDay(1) < limits.RateLimitItemPerMonth(1) assert limits.RateLimitItemPerMonth(1) < limits.RateLimitItemPerYear(1) limits-3.12.0/tests/test_limits.py000066400000000000000000000027321462017272200171610ustar00rootroot00000000000000from collections import defaultdict from limits import limits class TestLimits: class FakeLimit(limits.RateLimitItem): GRANULARITY = limits.Granularity(1, "fake") class OtherFakeLimit(limits.RateLimitItem): GRANULARITY = limits.Granularity(1, "otherfake") def test_key_all_strings_default_namespace(self): item = self.FakeLimit(1, 1) assert item.key_for("a", "b", "c") == "LIMITER/a/b/c/1/1/fake" def test_key_with_none_default_namespace(self): item = self.FakeLimit(1, 1) assert item.key_for("a", None, None) == "LIMITER/a/None/None/1/1/fake" def test_key_with_int_default_namespace(self): item = self.FakeLimit(1, 1) assert item.key_for("a", 1) == "LIMITER/a/1/1/1/fake" def test_key_with_mixed_string_types_default_namespace(self): item = self.FakeLimit(1, 1) assert item.key_for(b"a", "b") == "LIMITER/a/b/1/1/fake" def test_equality(self): item = self.FakeLimit(1, 1) assert item == self.FakeLimit(1, 1) assert item != self.FakeLimit(1, 2) assert item != self.FakeLimit(2, 1) assert item != "someething else" def test_hashabilty(self): mapping = defaultdict(lambda: 1) mapping[self.FakeLimit(1, 1)] += 1 mapping[self.FakeLimit(1, 1)] += 1 mapping[self.FakeLimit(1, 2)] += 1 mapping[self.FakeLimit(1, 2)] += 1 mapping[self.OtherFakeLimit(1, 2)] += 1 assert len(mapping) == 3 limits-3.12.0/tests/test_ratelimit_parser.py000066400000000000000000000043111462017272200212210ustar00rootroot00000000000000import pytest from limits import limits from limits.util import granularity_from_string, parse, parse_many class TestRatelimitParser: def test_singles(self): for rl_string in ["1 per second", "1/SECOND", "1 / Second"]: assert parse(rl_string) == limits.RateLimitItemPerSecond(1) for rl_string in ["1 per minute", "1/MINUTE", "1/Minute"]: assert parse(rl_string) == limits.RateLimitItemPerMinute(1) for rl_string in ["1 per hour", "1/HOUR", "1/Hour"]: assert parse(rl_string) == limits.RateLimitItemPerHour(1) for rl_string in ["1 per day", "1/DAY", "1 / Day"]: assert parse(rl_string) == limits.RateLimitItemPerDay(1) for rl_string in ["1 per month", "1/MONTH", "1 / Month"]: assert parse(rl_string) == limits.RateLimitItemPerMonth(1) for rl_string in ["1 per year", "1/Year", "1 / year"]: assert parse(rl_string) == limits.RateLimitItemPerYear(1) def test_multiples(self): assert parse("1 per 3 hour").get_expiry() == 3 * 60 * 60 assert parse("1 per 2 hours").get_expiry() == 2 * 60 * 60 assert parse("1/2 day").get_expiry() == 2 * 24 * 60 * 60 def test_parse_many(self): parsed = parse_many("1 per 3 hour; 1 per second") assert len(parsed) == 2 assert parsed[0].get_expiry() == 3 * 60 * 60 assert parsed[1].get_expiry() == 1 def test_parse_many_csv(self): parsed = parse_many("1 per 3 hour, 1 per second") assert len(parsed) == 2 assert parsed[0].get_expiry() == 3 * 60 * 60 assert parsed[1].get_expiry() == 1 @pytest.mark.parametrize("value", [None, "1 per millenium", "meow"]) def test_invalid_string_parse(self, value): with pytest.raises(ValueError): parse(value) @pytest.mark.parametrize("value", ["millenium", "meow"]) def test_invalid_string_granularity(self, value): with pytest.raises(ValueError): granularity_from_string(value) @pytest.mark.parametrize( "value", ["1 per yearl; 2 per decade"], ) def test_invalid_string_parse_many(self, value): with pytest.raises(ValueError): parse_many(value) limits-3.12.0/tests/test_storage.py000066400000000000000000000275761462017272200173410ustar00rootroot00000000000000import time import pytest from limits import RateLimitItemPerMinute, RateLimitItemPerSecond from limits.errors import ConfigurationError, StorageError from limits.storage import ( EtcdStorage, MemcachedStorage, MemoryStorage, MongoDBStorage, MovingWindowSupport, RedisClusterStorage, RedisSentinelStorage, RedisStorage, Storage, storage_from_string, ) from limits.strategies import MovingWindowRateLimiter from tests.utils import fixed_start class TestBaseStorage: @pytest.mark.parametrize( "uri, args", [("blah://", {}), ("redis+sentinel://localhost:26379", {})] ) def test_invalid_storage_string(self, uri, args): with pytest.raises(ConfigurationError): storage_from_string(uri, **args) def test_pluggable_storage_no_moving_window(self): class MyStorage(Storage): STORAGE_SCHEME = ["mystorage"] @property def base_exceptions(self): return ValueError def incr(self, key, expiry, elastic_expiry=False): return def get(self, key): return 0 def get_expiry(self, key): return time.time() def reset(self): return def check(self): return def clear(self): return storage = storage_from_string("mystorage://") assert isinstance(storage, MyStorage) with pytest.raises(NotImplementedError): MovingWindowRateLimiter(storage) def test_pluggable_storage_moving_window(self): class MyStorage(Storage): STORAGE_SCHEME = ["mystorage"] @property def base_exceptions(self): return ValueError def incr(self, key, expiry, elastic_expiry=False): return def get(self, key): return 0 def get_expiry(self, key): return time.time() def reset(self): return def check(self): return def clear(self): return def acquire_entry(self, *a, **k): return True def get_moving_window(self, *a, **k): return (time.time(), 1) storage = storage_from_string("mystorage://") assert isinstance(storage, MyStorage) MovingWindowRateLimiter(storage) @pytest.mark.parametrize( "uri, args, expected_instance, fixture", [ pytest.param("memory://", {}, MemoryStorage, None, id="in-memory"), pytest.param( "redis://localhost:7379", {}, RedisStorage, pytest.lazy_fixture("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "redis+unix:///tmp/limits.redis.sock", {}, RedisStorage, pytest.lazy_fixture("redis_uds"), marks=pytest.mark.redis, id="redis-uds", ), pytest.param( "redis+unix://:password/tmp/limits.redis.sock", {}, RedisStorage, pytest.lazy_fixture("redis_uds"), marks=pytest.mark.redis, id="redis-uds-auth", ), pytest.param( "memcached://localhost:22122", {}, MemcachedStorage, pytest.lazy_fixture("memcached"), marks=pytest.mark.memcached, id="memcached", ), pytest.param( "memcached://localhost:22122,localhost:22123", {}, MemcachedStorage, pytest.lazy_fixture("memcached_cluster"), marks=pytest.mark.memcached, id="memcached-cluster", ), pytest.param( "memcached:///tmp/limits.memcached.sock", {}, MemcachedStorage, pytest.lazy_fixture("memcached_uds"), marks=pytest.mark.memcached, id="memcached-uds", ), pytest.param( "redis+sentinel://localhost:26379", {"service_name": "mymaster"}, RedisSentinelStorage, pytest.lazy_fixture("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "redis+sentinel://localhost:26379/mymaster", {}, RedisSentinelStorage, pytest.lazy_fixture("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel-service-name-url", ), pytest.param( "redis+sentinel://:sekret@localhost:36379/mymaster", {"password": "sekret"}, RedisSentinelStorage, pytest.lazy_fixture("redis_sentinel_auth"), marks=pytest.mark.redis_sentinel, id="redis-sentinel-auth", ), pytest.param( "redis+cluster://localhost:7001/", {}, RedisClusterStorage, pytest.lazy_fixture("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "redis+cluster://:sekret@localhost:8400/", {}, RedisClusterStorage, pytest.lazy_fixture("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "mongodb://localhost:37017/", {}, MongoDBStorage, pytest.lazy_fixture("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "etcd://localhost:2379", {}, EtcdStorage, pytest.lazy_fixture("etcd"), marks=pytest.mark.etcd, id="etcd", ), ], ) class TestConcreteStorages: def test_storage_string(self, uri, args, expected_instance, fixture): assert isinstance(storage_from_string(uri, **args), expected_instance) @fixed_start def test_expiry_incr(self, uri, args, expected_instance, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerSecond(1) storage.incr(limit.key_for(), limit.get_expiry()) time.sleep(1.1) assert storage.get(limit.key_for()) == 0 @fixed_start def test_expiry_acquire_entry(self, uri, args, expected_instance, fixture): if not issubclass(expected_instance, MovingWindowSupport): pytest.skip("%s does not support acquire entry" % expected_instance) storage = storage_from_string(uri, **args) limit = RateLimitItemPerSecond(1) assert storage.acquire_entry(limit.key_for(), limit.amount, limit.get_expiry()) time.sleep(1.1) assert storage.get(limit.key_for()) == 0 def test_incr_custom_amount(self, uri, args, expected_instance, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerMinute(1) assert 1 == storage.incr(limit.key_for(), limit.get_expiry(), amount=1) assert 11 == storage.incr(limit.key_for(), limit.get_expiry(), amount=10) def test_acquire_entry_custom_amount(self, uri, args, expected_instance, fixture): if not issubclass(expected_instance, MovingWindowSupport): pytest.skip("%s does not support acquire entry" % expected_instance) storage = storage_from_string(uri, **args) limit = RateLimitItemPerMinute(10) assert not storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=11 ) assert storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=1 ) assert not storage.acquire_entry( limit.key_for(), limit.amount, limit.get_expiry(), amount=10 ) def test_storage_check(self, uri, args, expected_instance, fixture): assert storage_from_string(uri, **args).check() def test_storage_reset(self, uri, args, expected_instance, fixture): if expected_instance == MemcachedStorage: pytest.skip("Reset not supported for memcached") limit1 = RateLimitItemPerMinute(10) # default namespace, LIMITER limit2 = RateLimitItemPerMinute(10, namespace="OTHER") storage = storage_from_string(uri, **args) for i in range(10): storage.incr(limit1.key_for(str(i)), limit1.get_expiry()) storage.incr(limit2.key_for(str(i)), limit2.get_expiry()) assert storage.reset() == 20 def test_storage_clear(self, uri, args, expected_instance, fixture): limit = RateLimitItemPerMinute(10) storage = storage_from_string(uri, **args) storage.incr(limit.key_for(), limit.get_expiry()) assert 1 == storage.get(limit.key_for()) storage.clear(limit.key_for()) assert 0 == storage.get(limit.key_for()) @pytest.mark.parametrize("wrap_exceptions", (True, False)) class TestStorageErrors: class MyStorage(Storage, MovingWindowSupport): STORAGE_SCHEME = ["mystorage"] class MyError(Exception): pass @property def base_exceptions(self): return self.MyError def incr(self, key, expiry, elastic_expiry=False, amount=1): raise self.MyError() def get(self, key): raise self.MyError() def get_expiry(self, key): raise self.MyError() def reset(self): raise self.MyError() def check(self): raise self.MyError() def clear(self, key): raise self.MyError() def acquire_entry(self, key, limit, expiry, amount=1): raise self.MyError() def get_moving_window(self, key, limit, expiry): raise self.MyError() def assert_exception(self, exc, wrap_exceptions): if wrap_exceptions: assert isinstance(exc, StorageError) assert isinstance(exc.storage_error, self.MyStorage.MyError) else: assert isinstance(exc, self.MyStorage.MyError) def test_incr_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).incr("", 1) self.assert_exception(exc.value, wrap_exceptions) def test_get_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).get("") self.assert_exception(exc.value, wrap_exceptions) def test_get_expiry_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).get_expiry("") self.assert_exception(exc.value, wrap_exceptions) def test_reset_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).reset() self.assert_exception(exc.value, wrap_exceptions) def test_check_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).check() self.assert_exception(exc.value, wrap_exceptions) def test_clear_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).clear("") self.assert_exception(exc.value, wrap_exceptions) def test_acquire_entry_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).acquire_entry("", 1, 1) self.assert_exception(exc.value, wrap_exceptions) def test_get_moving_window_exception(self, wrap_exceptions): with pytest.raises(Exception) as exc: self.MyStorage(wrap_exceptions=wrap_exceptions).get_moving_window("", 1, 1) self.assert_exception(exc.value, wrap_exceptions) limits-3.12.0/tests/test_strategy.py000066400000000000000000000166451462017272200175320ustar00rootroot00000000000000import math import time import pytest from limits.limits import ( RateLimitItemPerHour, RateLimitItemPerMinute, RateLimitItemPerSecond, ) from limits.storage import MemcachedStorage, storage_from_string from limits.strategies import ( FixedWindowElasticExpiryRateLimiter, FixedWindowRateLimiter, MovingWindowRateLimiter, ) from tests.utils import all_storage, fixed_start, moving_window_storage, window class TestWindow: @all_storage @fixed_start def test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) with window(1) as (start, end): assert all([limiter.hit(limit) for _ in range(0, 10)]) assert not limiter.hit(limit) assert limiter.get_window_stats(limit).remaining == 0 assert limiter.get_window_stats(limit).reset_time == math.floor(start + 2) @all_storage @fixed_start def test_fixed_window_empty_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert limiter.get_window_stats(limit).remaining == 10 assert limiter.get_window_stats(limit).reset_time == int(time.time()) @all_storage @fixed_start def test_fixed_window_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerMinute(10, 2) assert not limiter.hit(limit, "k1", cost=11) assert limiter.hit(limit, "k2", cost=5) assert limiter.get_window_stats(limit, "k2").remaining == 5 assert not limiter.hit(limit, "k2", cost=6) @all_storage @fixed_start def test_fixed_window_with_elastic_expiry(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowElasticExpiryRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) with window(1) as (start, end): assert all([limiter.hit(limit) for _ in range(0, 10)]) assert not limiter.hit(limit) assert limiter.get_window_stats(limit).remaining == 0 assert limiter.get_window_stats(limit).reset_time == start + 2 with window(3) as (start, end): assert not limiter.hit(limit) assert limiter.hit(limit) assert limiter.get_window_stats(limit).remaining == 9 assert limiter.get_window_stats(limit).reset_time == end + 2 @all_storage @fixed_start def test_fixed_window_with_elastic_expiry_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowElasticExpiryRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert not limiter.hit(limit, "k1", cost=11) with window(0) as (start, end): assert limiter.hit(limit, "k2", cost=5) assert limiter.get_window_stats(limit, "k2").remaining == 5 assert limiter.get_window_stats(limit, "k2").reset_time == end + 2 assert not limiter.hit(limit, "k2", cost=6) @moving_window_storage def test_moving_window_empty_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert limiter.get_window_stats(limit).remaining == 10 assert limiter.get_window_stats(limit).reset_time == int(time.time() + 2) @moving_window_storage def test_moving_window_stats(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerMinute(2) assert limiter.hit(limit, "key") time.sleep(1) assert limiter.hit(limit, "key") time.sleep(1) assert not limiter.hit(limit, "key") assert limiter.get_window_stats(limit, "key").remaining == 0 assert ( limiter.get_window_stats(limit, "key").reset_time - int(time.time()) == 58 ) @moving_window_storage def test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) # 5 hits in the first 100ms with window(0.1): assert all(limiter.hit(limit) for i in range(5)) # 5 hits in the last 100ms with window(2, delay=1.8): assert all(limiter.hit(limit) for i in range(5)) # 11th fails assert not limiter.hit(limit) @moving_window_storage def test_moving_window_multiple_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) limit = RateLimitItemPerSecond(10, 2) assert not limiter.hit(limit, "k1", cost=11) # 5 hits in the first 100ms with window(0.1): limiter.hit(limit, "k2", cost=5) # 5 hits in the last 100ms with window(2, delay=1.8): assert all(limiter.hit(limit, "k2") for i in range(5)) # 11th fails assert not limiter.hit(limit, "k2") # 5 more succeed since there were only 5 in the last 2 seconds assert all([limiter.hit(limit, "k2") for i in range(5)]) assert limiter.get_window_stats(limit, "k2")[1] == 0 assert not limiter.hit(limit, "k2", cost=2) @moving_window_storage def test_moving_window_varying_cost(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) five_per_min = RateLimitItemPerMinute(5) limiter.hit(five_per_min, cost=5) assert not limiter.hit(five_per_min, cost=2) limiter.clear(five_per_min) assert limiter.hit(five_per_min) @moving_window_storage def test_moving_window_huge_cost_sync(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = MovingWindowRateLimiter(storage) many_per_min = RateLimitItemPerMinute(1_000_000) limiter.hit(many_per_min, cost=1_000_000) assert not limiter.hit(many_per_min, cost=2) limiter.clear(many_per_min) assert limiter.hit(many_per_min) @pytest.mark.memcached def test_moving_window_memcached(self, memcached): storage = MemcachedStorage("memcached://localhost:22122") with pytest.raises(NotImplementedError): MovingWindowRateLimiter(storage) @all_storage @fixed_start @pytest.mark.flaky def test_test_fixed_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limiter = FixedWindowRateLimiter(storage) limit = RateLimitItemPerHour(2, 1) assert limiter.hit(limit) assert limiter.test(limit) assert limiter.hit(limit) assert not limiter.test(limit) assert not limiter.hit(limit) @moving_window_storage def test_test_moving_window(self, uri, args, fixture): storage = storage_from_string(uri, **args) limit = RateLimitItemPerHour(2, 1) limiter = MovingWindowRateLimiter(storage) assert limiter.hit(limit) assert limiter.test(limit) assert limiter.hit(limit) assert not limiter.test(limit) assert not limiter.hit(limit) limits-3.12.0/tests/test_utils.py000066400000000000000000000015571462017272200170240ustar00rootroot00000000000000import pytest from packaging.version import Version from limits.errors import ConfigurationError from limits.util import LazyDependency def test_lazy_dependency_found(): class Demo(LazyDependency): DEPENDENCIES = ["redis"] d = Demo() assert d.dependencies["redis"].version_found def test_lazy_dependency_version_low(): class Demo(LazyDependency): DEPENDENCIES = { "redis": Version("999.999"), "maythisneverexist": Version("1.0"), } d = Demo() with pytest.raises( ConfigurationError, match="minimum version of 999.999 of redis could not be found", ): assert d.dependencies["redis"].version_found with pytest.raises( ConfigurationError, match="maythisneverexist prerequisite not available" ): assert d.dependencies["maythisneverexist"].version_found limits-3.12.0/tests/tls/000077500000000000000000000000001462017272200150455ustar00rootroot00000000000000limits-3.12.0/tests/tls/ca.crt000066400000000000000000000033411462017272200161430ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIE5jCCAs4CCQCQsEnzo4zO8zANBgkqhkiG9w0BAQsFADA1MRMwEQYDVQQKDApS ZWRpcyBUZXN0MR4wHAYDVQQDDBVDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjIx MjIxMjEyMjEwWhcNMzIxMjE4MjEyMjEwWjA1MRMwEQYDVQQKDApSZWRpcyBUZXN0 MR4wHAYDVQQDDBVDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEB AQUAA4ICDwAwggIKAoICAQDcPCnfb1IJd3VNXhDXKWJ3PcjWFD15J3n7Qak4EtYu s4WK5m5qFCr2vHLaJaXsgxgZnfT00bLzUlQHaQQjGA5O1Ag428RhUC5qoJZOHplg bCFdmh99s5auuvX2cTOK/mqP+Jahutsn0tYezyzvNJKltU5wSLI/CFtCXsgR9YX+ WAmsbw3dRckWFH6rHBGlvm1o0uXQMdsdcUccSFGnlif379pw9JvCseQBOlCr9g/0 OB2cpZbl2xn32TKQBHXc1ZaBZnuqoZsafOIQ5Aka+kff9R7rgl4eiXgPz1aP+/Gr AxJgli7H8Bf4yiux4xVXzQlWGtoW8PIJJLFN+Diky5yQBuk9nUhRFk7vG8HoQt6+ xp8qJ4GwQBcDrghlqvsUwB0CHxHYSWKPrmF+k9KhDXWZNN0KUfaGt+zOCO+Bs58v OZh4kbSFh30VkG3QmivCSpU4a7VxTrits37M6E2zJXgOocsLQeBTbTElk4J2batw E5Xo1nDUwCQswolOkFzjYrhpHuRBXgN65joP5SOZH5R3/StAwHP6xSgaYfgb8yQh FOoYRukqmjSIZTdYdwFgSMmYdX193cQYKcItVpSKDqPLcMwKhLnzgeEj0KtBsAGI PVZml8oXLNCNneSovKk2qfMA6GwNK74r7Rq/eKYKZ8IyssLvjkoOj8gor6RJnitz 0wIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQDL23I564CMb6CKp63C1JB+AaYBFW7c M0xEe3Um07OV3VCgUenlzM5qToaUSwfYRz/iKTedgkdVMROqXVMcpAiMju/fFuOG OgBJLxtO4SfXo1/359Rp2H82Gty1yCUop4TOLMFYsBC6+lJgfh9/XCW+21FdcobM ITceaVUqwFXHt2YlJ0ZXHMvCduxR/6xP7pj4jrpmywdzSPX5sj45AeicnSOitgPN iznCsWnVktc7EcWO2gwC0h9MLvVEn8d634Yh2NbggZ3Xk9JMQgaToj47y41TD4Sh ddATGOITNf2uzj+w4qCl7BFBMXfc77O8bKoHIEeoDCW3Hp4ACI1NV+ryyRlh+bhl ddCNAuVQmGRmz6elzV7DOv8/Qs//yQXukEpT+VAZyW22yYA3B4DI/mzSuqhzc9s/ R8Gvekn3pX7NtbIC/xV2FoG2hPbehHVVol1Ocz3wb7VOaqe55E9ocBcZNp/MOaV+ xCOALPszbC8E3D7dkcuEU8XDTDQCp6jgKgyOft9tXU1L3odVqf4DNWrh/q49ggUN K2fsK7Oh1DyYCTSm+vRdah/leH5FVS7XKR3It2/exQ8RN+Jdeu/jmUDtOLb5Slse XlRvstejGKx/3BdsW0z0kAA5fyr/u3hKeXwCW22gDs9U1fSkN/9Bh5aqZlxGgv5e 39wnC72tq8pxEQ== -----END CERTIFICATE----- limits-3.12.0/tests/tls/ca.key000066400000000000000000000062531462017272200161500ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKQIBAAKCAgEA3Dwp329SCXd1TV4Q1ylidz3I1hQ9eSd5+0GpOBLWLrOFiuZu ahQq9rxy2iWl7IMYGZ309NGy81JUB2kEIxgOTtQIONvEYVAuaqCWTh6ZYGwhXZof fbOWrrr19nEziv5qj/iWobrbJ9LWHs8s7zSSpbVOcEiyPwhbQl7IEfWF/lgJrG8N 3UXJFhR+qxwRpb5taNLl0DHbHXFHHEhRp5Yn9+/acPSbwrHkATpQq/YP9DgdnKWW 5dsZ99kykAR13NWWgWZ7qqGbGnziEOQJGvpH3/Ue64JeHol4D89Wj/vxqwMSYJYu x/AX+MorseMVV80JVhraFvDyCSSxTfg4pMuckAbpPZ1IURZO7xvB6ELevsafKieB sEAXA64IZar7FMAdAh8R2Elij65hfpPSoQ11mTTdClH2hrfszgjvgbOfLzmYeJG0 hYd9FZBt0JorwkqVOGu1cU64rbN+zOhNsyV4DqHLC0HgU20xJZOCdm2rcBOV6NZw 1MAkLMKJTpBc42K4aR7kQV4DeuY6D+UjmR+Ud/0rQMBz+sUoGmH4G/MkIRTqGEbp Kpo0iGU3WHcBYEjJmHV9fd3EGCnCLVaUig6jy3DMCoS584HhI9CrQbABiD1WZpfK FyzQjZ3kqLypNqnzAOhsDSu+K+0av3imCmfCMrLC745KDo/IKK+kSZ4rc9MCAwEA AQKCAgB7EgfYO24dqirb7Df+9Lp4pgcoUrEzlfLdjIjSs/wKpvl2cGS9VL3Zmd+Y tjctnuiF0DZF5TVaMeSRK4dLUTMVq/lYRL9OHi+CGHFGAGV3c5WFqA33Abevtvyi FgpWnuvPvnv2ruOvMmvDVcTne3qDrXjC2Gz7OmE9CphFKO4S/wGQ4ZJe15OSiitq 1UmrVGnxjQEtwd6PanDxhhxTSV7Wd6ubOaYdO6mNYhgYsG2j7QPnOMXFdxXg4fgW cNeoa5cqgqmS5bI2MBlolPXdpIUryt/Xfce4yttUAmp+Fa2sAhBv9xz3T90lg1y9 kIepxAN2oZTEAytOHroSNyb4OGwpKmbR0b3ddtaXoylUAGUx10wwWu/dShJFmKlk klilRZiJUoA8u77dk91akUWgweloNApGKRI8aTQfrnJT9/9yJUc98HZ2KfQRkhu7 1DzC8/MRx5EICTCWER1g6J3+1CdrEdutzDVb2/d1vNVSdNus9fpw4UodktI5B8PX eHkl78Cq/CtbhoZIWtiyfmgchOKFafzXMmup/HXTLzmiqVkFZ1z+GO1765icgaRT O9wxmEdIfjtreBkq/xrfh+08SmryTLnF0othx1E4ggCDUCvphszL8CGSucwU2b9c Q0SDBpuJePAqMuc3pmMslVf1iThVLhCEN3P2uZxLk07rCsfueQKCAQEA+hlRW65L ccyxP/y/n216IAa+U6PE1TNJ8yPWH4DAx0B+YYlVa27O5X7LeR8KpDV0u9MMVyYD 4IUakNnTzafm0MesslOed62C0yAUsfO/ktIUPlmo3UwAWthFU7SAucbg2BJZnUCs VYmyq8SO2AK+kBKg3/0CQkBYUk4LRtnCWBpIlR5dU0wy3SeveO5VPJ2iQxCwbCi6 tugvAuxJn6jB9qzKH1nnA+ibE6aIWHVv7xe2yrjL2uNqfeg/uC3S7QuKBVzjlf1M itHNK9YNnJDRE2BZUZ9ZKhA5rQaxuPnTnjvRo7f4SfNCxqdSjF6YA80g1vEarvqd DutswdWgyWIBnQKCAQEA4W51MHFr/o4tbvDcJ3whExF3BmIS2zswQWBRz0mmvZ8L OuP7ibd9IQZVFR88TCR3urGw8/RTQabJs2OU3w6JvcHSbKWBpyG+41Im9+S/7AqY sQFVU+3DJpAiDj+hD0J9Brux8cXTTYLLa1vaFBFZ4XbqodgBg8/hibuAmjyvwL+l jDliUdIuKDLSkGKDTRosyR+xeYZATKi0QrkvVkmFBBf0KDEQ34Ap5LdirGFxTucl 4yMwsKJYvTkCuvqDbz8gT0U4q9xdPf0oh0HkFTw1KABriWM5PMVUDAPqEHNf3UpD gGNIE8wUytw6mOktuZvCylbk2FGk3rKhlDCv0s1ILwKCAQAzSvf9ppyAhWug5PRj /S773IYyXzciRreW7zWeFmjzNI328RScyQgT11Jhz/T9wpzjXKB+SySmaoYNSS+A UTwse5c2p6Grh+F57W1xJb2XnhBazUatxEWaXBqz1SRFdb1tcy3/62Cs1mXXhqOJ N97fa4KlDZ3l+o5iqvDzIfwS16klNCoNyGXyJ458bdz7ZpT9zjFwrVXF546kudNL tG+7aKq3jVes3IrZl+EEy2kejy2FHCtrBBM2KWjQHE3TIfe3q9pdS6rkGa1LnUYD Y0wN/kezTswZrWQ20VWny00Uec5AFp4MhKCgX3a1pTzmIq7mhmJjmrCVe8E9a9Pj GX+dAoIBAQCK9ns4/IH07hHltIYRtPvydC6p+aTkDa1mb4ievikR9MuIZuc08q4o wG2MUq/pxAUmScjp6S+1ZaoqcfVgNucD2UnsfW3Z5/xyxr6Myy7fw3xyiH7f+M32 pJzJqcGP1Y2aq/M6NZdWzYy1xm49NqDwUky6rZvpdCj0rWfOLCCvMbRMJ0eLZoPb ddDE/mUwDzWSWX4einpyhwBvrRYYG8PqTRXN/MkI7Ull2wHxfzPIzsZVKOknyTvo kQ800Z5Z4lD5bJNNpc64mqj0D9MYn/+og/HGRCsRYha4PWV49JYgpFnN9+WDhqc+ kdlxTzRHmVPPRRdWnaWDPhxKbGTGDpMzAoIBAQCIq/XIbPM/EGCtxawv3SftKgi2 FEtY2O70ouOrecBK65pi78v1tKXZApyrV3YE7W5Vo8RE8YLeUQjnxzrLRusMPYCT 9XpE96AiP7kGEDtvEyIgQ2zH7bgxnwZ+SE/l3oZgnG24FaWts1Yo12qmT1z5Fixf 7yE91E6MVVEOkmfjZcNKUvOmmWiD24DPaKP3v5DUN0Cda3pPqw18ry4M13ns4Hd2 n+NKIGyTa7hIWUWABkk0SwcYnVf1dfO5TjtSj3tZpWDz39YWfc4vykH92hbprBLd gzjC8Uy+iJ824DQAWaoQCtaS4s+0TYg8hQcbWFZi4sKZTEsPzmfyorU7VV9B -----END RSA PRIVATE KEY----- limits-3.12.0/tests/tls/ca.txt000066400000000000000000000000511462017272200161650ustar00rootroot0000000000000035EF919824B8DFBE68EC6860AB34C681883BCD23 limits-3.12.0/tests/tls/client.crt000066400000000000000000000026741462017272200170460ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIEEDCCAfigAwIBAgIUNe+RmCS4375o7GhgqzTGgYg7zSIwDQYJKoZIhvcNAQEL BQAwNTETMBEGA1UECgwKUmVkaXMgVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUg QXV0aG9yaXR5MB4XDTIyMTIyMTIxMjIxMFoXDTMyMTIxODIxMjIxMFowKzETMBEG A1UECgwKUmVkaXMgVGVzdDEUMBIGA1UEAwwLQ2xpZW50LW9ubHkwggEiMA0GCSqG SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDC8IrReeFUYipFDBg4syd9CYCe3d/VjP+t yv2EQWFJIF4FIBbbDgccgS96JqbZdIjHy1vtxG8l1S0peUzVcQmfCpGwjCfQGvd4 i2KqAagzxJTS3dM5d1cFRHYI/TYsV84gkmW5eQL3mQ3UMG18RrDnrnx94XLLeZre +UHT2Q3nkknfYouuRwwEa4ACIituPNFZfbZweEdJuoXIgWydp/Bo1gb4UEgjGjdi aI1tZpr62qegDu+U3HXWgRlV5zGfEYnHF2hoBgutZePqTvmNsK9uK5ZYqV33PGHS 4r7pAUZFIigfSw7xlMBrWvlDl47KOL00Fe0c1nuE2A2LsSDj4qBFAgMBAAGjIjAg MAsGA1UdDwQEAwIFoDARBglghkgBhvhCAQEEBAMCB4AwDQYJKoZIhvcNAQELBQAD ggIBAJogZp3w0Xpk7Y4P7MLS9gdsHrzPWq9/GVaW8LFJseAlKRZW7MgtH8AdvTKd 2CTwYPAh0Q/0o8ue4mjofVceCPGBgBOBi6D3NE8266X2jImFt1d45e9ensl26E/i CDjNBKQKtctZp6FsjRIKoxWXGpaNQjjiQsQQOMs1VyTES9BOVVzSFk4w1z1VYSQN CGaOWYzEUX6R7FBSdocMspYt1oh9kEYqNv1pvrY4QTIes6bKpwVeBqKXxYoGFbXV Y1ViCIvIiV+GHLvLyDdcKsID1BurPCGQi1ysRoGU6pKn02iKjUrYnJ0kkTLXQp1W ZY96XpJvuERBRvY2cjHBbArOjZDKtvyjbliQ0BlXpybNtETC9nFpYo7pjfQwpv7m vyZlYYP5gC1muqw5Mx5pDSy3lpmSTcQ8RVjkjiD9oY47J6idJdW6MFJBKgf9M3Ti 6+cSe8vthABEAzhLBeHUKotLVZx+jHTzsZN5Cfmb+QiwWB3lBoZqvly5YyRg+pUh Pf6bp4e/wPAOiB4SpCPOc1D1b9tLgO0XgvVmxclbZ+JCkjTnJsPLY445GH4MnD/l 4xXUjzcm073hJalP6fprD4i95h3dF/MVlw4TobUbqCouICbdhsQ/qDTMN8LwaRAA S/+3oVZfR1O2exEUnBRAHe5zjXbur2ooQDJ3WPkqCrTy/Gx+ -----END CERTIFICATE----- limits-3.12.0/tests/tls/client.key000066400000000000000000000032131462017272200170340ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAwvCK0XnhVGIqRQwYOLMnfQmAnt3f1Yz/rcr9hEFhSSBeBSAW 2w4HHIEveiam2XSIx8tb7cRvJdUtKXlM1XEJnwqRsIwn0Br3eItiqgGoM8SU0t3T OXdXBUR2CP02LFfOIJJluXkC95kN1DBtfEaw5658feFyy3ma3vlB09kN55JJ32KL rkcMBGuAAiIrbjzRWX22cHhHSbqFyIFsnafwaNYG+FBIIxo3YmiNbWaa+tqnoA7v lNx11oEZVecxnxGJxxdoaAYLrWXj6k75jbCvbiuWWKld9zxh0uK+6QFGRSIoH0sO 8ZTAa1r5Q5eOyji9NBXtHNZ7hNgNi7Eg4+KgRQIDAQABAoIBADu/rFyuJaFOgTQu k9H1H1pUqkaHLM7wa4FiITgDLgVS1V5uynKTpxxWgC3JYQRvkmjibKfAuGx5HWGD 20fkYJWNkL/zBVTknwskdbYz0dEkJJVJBi+r41Eq/Z6dtZf7xaL4HBOwAnfPQozR 1wzrQdeRBHk0u5GgERbcgQ+kjH14MQaG8s76z116AKuDFDB9mjYqjQscRIJu1f0R yvw25IMJDVfgMCur4FJhoHAqjevzhF+tDpubkZ4cx+nopeZuDAey/LcyvFbnDEqS KCL0zQkIZT4OqEGKkPvIgOMPgptqnopuCaR372xi6WNU9rNTAXkrm8TT55mQ6jY3 jcwWCyECgYEA5szPp3kHFMdLkdgwWzxoTtJVewZNxn2s/7DvnipD3HVw5v2Zyh47 ayMlhHaOtH93kFnw3opXz0A4GccKq/7x4InK+bjhU02IHoqTP1a3YK06aiKc9Pvg c9ScdFaeC3m6T9F/ub3WQ3jJtdlI2W1+TgBiVAKXI70ar1vdO8+XackCgYEA2Dlh nd8nCHP4/mtaVdBp0PK8rZW10Zn2WUvP8r9GCuCnQGpI43EpBbJBtj3uwAz5sHLE 1plLEWP6L4HNx2lzPy60sLlfHya2+V0nOACK6hLiVaNPEqkR2gFdhB57x6k/UVnh qQ4NEooBvKn2SUjgQAT0Fhh659AyD1joCzcwwJ0CgYAMQWPlaK/PkqWYoPowXgzL 4q9XUQZgO9wr16FhrKzdz/6mxWCdMu3EyqD7X7BENWCL8bqwlngDMU3JUKBocak2 WxrEF+UfH0bTGFipTFmAcLJhYGUlPRq2C9wi8VaDFXunMWCu5D20Z32OgnMz2HSU f8C45UpRQ4FfPmmI+4T2wQKBgEx3lo8nYP17Th5jxtG8X/+2KIXXATL8GQQhbIwz msi0BFIihF5zIYfTL6wMoQeoTxFYtUYTdlnTGqJ8bFLOdyRsFxi8peg1X9Y2XG1Q u8/mF8cjdw5pXlfTzcewpCPLNgpGGCkaQANDbTPUg7YNIMljNxMtWyHqHhoRXhbB UOt9AoGAHCpbIxQrc27WQ9p2Vn2R9h8ABgXi7n4iMdWRzzvgoI1DDju5kPp0UHtL fB6pHRLkPebvC5STEVQuvyxgv8VqpNVhq8qFvL4c2p3+/aBf/Xd1cFOTc9oegRH6 lPOGZRQbLwH6FmkT75ztRDoO030jW3JdJfqZntCm76kEZV3r0Ck= -----END RSA PRIVATE KEY----- limits-3.12.0/tests/tls/openssl.cnf000066400000000000000000000002421462017272200172160ustar00rootroot00000000000000[ server_cert ] keyUsage = digitalSignature, keyEncipherment nsCertType = server [ client_cert ] keyUsage = digitalSignature, keyEncipherment nsCertType = client limits-3.12.0/tests/tls/redis.crt000066400000000000000000000026071462017272200166720ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIID6DCCAdACFDXvkZgkuN++aOxoYKs0xoGIO80jMA0GCSqGSIb3DQEBCwUAMDUx EzARBgNVBAoMClJlZGlzIFRlc3QxHjAcBgNVBAMMFUNlcnRpZmljYXRlIEF1dGhv cml0eTAeFw0yMjEyMjEyMTIyMTBaFw0zMjEyMTgyMTIyMTBaMCwxEzARBgNVBAoM ClJlZGlzIFRlc3QxFTATBgNVBAMMDEdlbmVyaWMtY2VydDCCASIwDQYJKoZIhvcN AQEBBQADggEPADCCAQoCggEBAOZ60/GOktff2wlrcOpt658e6RL6liIuoznJVFMP 0VIzOjciZSg9SwVSU+JpS4XQ7diCq9G/5GbW4KPyObo5HCW7lYw4ESptcrNBVD5m rQwonCCWhV78Vr+ikk87ayVJm78L+jCrZ/cFBBspy+IZqXmlBa+4NzW6wvfjCE2I kJEBBllTptmjOHZu76OLeWib39LMtdaeQtp52TUlMjVPAruQ1/81T/jhgeivsWbz klIX5IAIHZdg18KGtvZZcYzPyx/lbCNWCaAwvdPcBdR3FhCfEm06eMP9jnvbij5D t4G6YvSpHJxa5tYNTqoPLakQ8pHcK46U75C2xWg32QfnwKkCAwEAATANBgkqhkiG 9w0BAQsFAAOCAgEARGlNHbbu1Coh1Hz2F65+nLH2pehWjmeJTdEQzi/UlFiYGBXS zXYMp4F3zw7qcm89kZsbk77SN4RRg0qsx3msP8mxIJP7se5ptmxsuHmYckfRw9cv 6O9g4GyHcDnnR6SR/r66WM9Z9ZbZUZA4iiTF0Obr0XMlqcucq6yPFxAAXWKQAFWr 5DhjcgQt4WKlfLsWSMaNP5IZqtBF3/i5f92Vy7bJbZLd0oDrv7vehM3ftfvars/g A3yLwaIbfgB3C1h95Vr2byt8c6VTd+QKLJBFhP2eYORBRChubHTNfWLli7k48vsD 03rx9RFSW1edKva+Pdn6K1S+3w74ZOxumBwLFY8mjFe5f0LcCoXJJfrYDS20OMlg 6JHdJv2EFBdPv2nz1Y7puGcOj2WXOAnVjH+PC5yWILaJdd9aaUq5HxW+IIGLf1YM 94vvSNIJEFLeINhTmWJozCHUPk1Yqmh7T6FdCs10xlelUZM1GVWpIJvoESUeeBvE XT82v9Jj6LyQ15K81vGXu/8Ujjqvic5t2YCJg55hK7h02u487RhJon7mft+Zae7q Wt/j6DkfPkGnHZqBxSyAJH5hi9cyUwbJ5aGlV2u5byvfM3uQ0X2P2YX7RhGwM0Z0 zlXSeoex0UWowEkMTV1iE/ceOYQgk5iufxo8lxGfeQmglz/n6zVmtA6LNtk= -----END CERTIFICATE----- limits-3.12.0/tests/tls/redis.dh000066400000000000000000000006501462017272200164710ustar00rootroot00000000000000-----BEGIN DH PARAMETERS----- MIIBCAKCAQEApOY1TE9VNZWJrwJTIxfiW3JveP4hlfFJHN9Lc0yyJiZNzesR8hNW yXiPqwzsIi7nxRYZJ1n2RsC3iIfxpt0G8bHAGGJzye2u45E2Nw08iZRtvB/xBQv0 NfRZc/5A9TV5bX0sJdnbmQjFZZve2f/F5FkQ3MnavgqoiIR/gdrvZg146fodB2Kz 33RahFzJLxfO4jw3+0xp5m3hFBBWo57ZDPHmNmvkxq8ykeSdBQvo6C8BCy/ZkSSr 7NSMT18Wkd4fMzzrqY9Iohow8VCFYd4J92jQbcYjPSgv1qg/aSbjzUaGfIPQ84cB pQJzulSYpEZWnBYYCwuXN1lsvOwH3gh9qwIBAg== -----END DH PARAMETERS----- limits-3.12.0/tests/tls/redis.key000066400000000000000000000032131462017272200166640ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA5nrT8Y6S19/bCWtw6m3rnx7pEvqWIi6jOclUUw/RUjM6NyJl KD1LBVJT4mlLhdDt2IKr0b/kZtbgo/I5ujkcJbuVjDgRKm1ys0FUPmatDCicIJaF XvxWv6KSTztrJUmbvwv6MKtn9wUEGynL4hmpeaUFr7g3NbrC9+MITYiQkQEGWVOm 2aM4dm7vo4t5aJvf0sy11p5C2nnZNSUyNU8Cu5DX/zVP+OGB6K+xZvOSUhfkgAgd l2DXwoa29llxjM/LH+VsI1YJoDC909wF1HcWEJ8SbTp4w/2Oe9uKPkO3gbpi9Kkc nFrm1g1Oqg8tqRDykdwrjpTvkLbFaDfZB+fAqQIDAQABAoIBAG3RmslfcNKRpHGR kyTwqTORa+cSJZU1+apgJuDJCakMJbH9nouU4PkxKC0alNS3H41pfescbv0dZ75W fXEhIytLfU9pq6w2qE+HUlfFXrELbJEOh5Ah1kfLc8FzA3Op5h8a2uRz177Y0gML YE1kgt8mZVY8BXqBecLZl6wx5QhhuBP9orMAzx0hZFEPMbYxOyNiOVE2oJpdyJ7g J6sKEzFt8c8H55XKbX9UJS4j2CYQnKNPYui+ZredXP8fv/W1kQu7mDRmzlsaUt92 IPjz1aiAPRWnMA3D3TwnYJc5+9i5nh6b+jsLzU+HDawM3GbnT7VfbstTRjIzWlzm tXu+Od0CgYEA+LBpKcwVqhmcej8h+KTBtYUtDO4688ciRdFn91T/LkxCn4jArnXQ akmz1ZbyPx3bQxDNi+33Q9+8kD6aDbFR18DYwr7uEA2y70E+fLa3hh+XoQFVbzkE 8i5BqE0/8WBGrIZTrp065S2164m0ViOeGI3SfXqfUc1XgPQtRUJAE88CgYEA7UFg jQirw9Bzg3K62pzBmeoWn7juEgAQunsKMmXaCd+meG6jE28x0xyv7J28EM8VULWG 13mj0ih0tNGIUrxzLjKrEqN7UwFdQLMFrgw0waCGdWX9Z3wI9RqlMq3PjUeaEo8C oX8Wfmu2wvG0I6y8aFYTCnBiLbVj1lfpzGA06gcCgYEAzDNmlqh6XYrcHtD6HwWB /X/WBxkSlRs9PCD/gmprgmOiDQcoV9hrgurz3H0uer8UTFRewvhqIXR0i+irbHTe ZA94rShDqDDnbWegXJ1XwqzsP4st0L78HbP3u696wsMwJPfDfMykEIzhJP+UcWqb pUPGw7vNWEkJAdz90zHcFacCgYBPW5vQ8MKLR9EreZe4gdBcDCmvGStAiBcQWRFr FBhfTxieldskKABRbfoGOihqsumdSmqk7kH2jKOky6U+j1wzaq1AGcCpXf50Iq1W ryyCS5BN+mRNVfalDVmOXwbaacXGSWGG5Tx/4sWJ5VDs0d1fRNMvnbGpOk//pP5F 5zQrxwKBgFmNciUKMooYGiXelFtpvkGSe21ApDrnDuI63dV9C3YUUCmMop4AjSzg ZXf+vGBvTqV2TAfx3043DBdmP83q6uvMFQV7c+yksiARykr67qus5fh2Yv8cFiuU +iAX9QJXRmE7uJjIqycXMSo66KtPLa2qpT8pHsbSoLAC3MOUKr84 -----END RSA PRIVATE KEY----- limits-3.12.0/tests/tls/server.crt000066400000000000000000000026741462017272200170760ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIEEDCCAfigAwIBAgIUNe+RmCS4375o7GhgqzTGgYg7zSEwDQYJKoZIhvcNAQEL BQAwNTETMBEGA1UECgwKUmVkaXMgVGVzdDEeMBwGA1UEAwwVQ2VydGlmaWNhdGUg QXV0aG9yaXR5MB4XDTIyMTIyMTIxMjIxMFoXDTMyMTIxODIxMjIxMFowKzETMBEG A1UECgwKUmVkaXMgVGVzdDEUMBIGA1UEAwwLU2VydmVyLW9ubHkwggEiMA0GCSqG SIb3DQEBAQUAA4IBDwAwggEKAoIBAQComqY2T967jGODbsRZeZ56LNthT+DOt1ZR EBQCVndJJpmYWxMSLnUWO/iQ/WcHbFPabHzhaJTBX6IgUWg//eWpyzjTVP5HdrxP saC/eCn8FbWVhjVhE8MXA/FpY9KF+GJ4j/RIq6u+O3NV7tKSlfhZT9FUkR9UuAiY TnCtXiaapqboTASya81y+dbfwPezN0C2xblp+WRZSghenqO+RNvrGSBowHzuA5cG SDaAUeFc1vIBSl4F5lXpA5rjRvmzCG1/dUkL9bpAaTKmnvnYRuZVuRlzmuRyfFKN BIs9lkzGTwhVIjzcH556pS/sj8DAKCjA/cQGOqANreYxZUT6+z0LAgMBAAGjIjAg MAsGA1UdDwQEAwIFoDARBglghkgBhvhCAQEEBAMCBkAwDQYJKoZIhvcNAQELBQAD ggIBAIjEclhdIUQP4DSYUA4QO28I/X2yZ7jVyyIm4GtPthSXufmgW0EYA9PycAwc wgDQJsqQTbaS5B0MBOXJ1FI/As8ZrDJIMK9yMNpDB69P4bn8xHiB9XBUMBWCeDdh mbUOPdXEglG/xlWeWFtNO9mx4qtr/6eJnu0iFj+E4y8twdNF26WBgv9EXwlD8XPs B7WwuT9A8wnQCmbAEAjeTpvbIr0LPtPoMT8tIW5O9kaMDf+SXInwFJWfctO8nFe3 Ujpt0IYlwW96l8j4LIC6921zLbbL8jpB44xwEUMcKFUAq5HJCEOFQ9r0Ue+10xfb gzcdvsoLDxpBwYigLPhSjMDO2adcSMA1yizVmYyMgzlcnL1AfJzJYkxmIf+f9Hr7 7u/yJO0YI7Gra+2IUzUJpOuEf92c+05p25bEBXNwUVOwrBL45ZeeSxTt9bRAdlAy Hx/ykIx0a7QzshrEyZgkIGOeKGxZqb3XBH5KKNd8QSlF0jFHuzNnqmo0eX4qyjhX XZ6MJkgp2hEOqa/8dmKbmwKeqQcdNLQ2f3wdbnN7wNeOy3AEkgTUZgym0Gyksdgs KQbGWStweGS40QbCIXrJlAsMV0g03V5u0xeh/NHZ3vPk5aEy/r8sXMCmWVCqfdYc k9NY1uZkmUFlpqMRdVjEqCKUzZTxozoITlc3NxGABEeb1xcg -----END CERTIFICATE----- limits-3.12.0/tests/tls/server.key000066400000000000000000000032131462017272200170640ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAqJqmNk/eu4xjg27EWXmeeizbYU/gzrdWURAUAlZ3SSaZmFsT Ei51Fjv4kP1nB2xT2mx84WiUwV+iIFFoP/3lqcs401T+R3a8T7Ggv3gp/BW1lYY1 YRPDFwPxaWPShfhieI/0SKurvjtzVe7SkpX4WU/RVJEfVLgImE5wrV4mmqam6EwE smvNcvnW38D3szdAtsW5aflkWUoIXp6jvkTb6xkgaMB87gOXBkg2gFHhXNbyAUpe BeZV6QOa40b5swhtf3VJC/W6QGkypp752EbmVbkZc5rkcnxSjQSLPZZMxk8IVSI8 3B+eeqUv7I/AwCgowP3EBjqgDa3mMWVE+vs9CwIDAQABAoIBAA0xTKAjT8JNEdxO 5n45zfSxXdkXbve90Ls3Dtm/+kV3B72sIqnhiLfcqW5u5YjnKQiGRJ8+l6Up3n24 4Pdg3n+SrzeaBuuAluuFcpaIyIA5EqRIhBS8fx9l55XfgbGJ5eqZGPw2spGIBVZM AZ4GrhAO9sovMMxoOgPw31AxdcPT9eQ28ZXd9p8gCqATgFFZ4F82AA5ytyBA5YCi uElTaee0XOoLvEMzl5RqGVSmrP4+QJunGOn9YGWT6Ew8orgaxUScxvt0Bhn0Q6YG 89Vtf/JVeGUWamtRUUJHnolOTbfxAGkJ8rLCsDJPN63p4b03pX7ZbgscCBHR77uB DGBjBEECgYEA1kKuvlFea47MCesPtSZYKb6M7DDtaRfAq3ZLlbwIvZQcBvKZexbr 1SbeIqtaO1eaJ2aG7qa/YS4i0g0LmAd8gbm473qkyhbsc18KMgOoSZUAeXeBRVQK FArOVFjkxqjy1ZOGGneMvMdsQD/qbJEzY7bZm5/EW/9eiDrH4OAddY0CgYEAyXMN NcO4fZf2/6uzPLd+wQyr28cWFkPuhF6HU7KTBWw995gRID7wFIsP7lpbz09cAAac EXbM+ix8P3G+B5LpG1Q1+uduahKa0e+Wrz/8MobwzPyPEA5I4UZC9Ca/jP3fk02s Unt3+dABuRplci580ktLDWpaJpv9BKGCPUs4mvcCgYEA0Bg1pWxb2vT2oI9nUxYp vGlwbxlaC57hJuaepI/aoV2PFwRaIp1xcTTtvBhWa5o6gxfl5zWWXGk1snKvFvn2 Z/wcbKLeF9TJ04ez3D2HHegUbOSvbckgfLS+DlfD4lpGdz5vmZcQ3myFa6VoJgek NK1rjeVPDdbpVkBYzsR8eCUCgYBgkT5r33649ff5Z51sJaolcKWRnUYBNl9IJOhK PvgrSVALOGI9Su5baq4gXEBWiHxZptF5rbFk255T63pktw/3gc6+j+JmBrUgnfFl 4Q5MSlN0cD1EPRTCqKO/1jNMqwZZ3ooUoAtLPWMWEIlAyvga9uzraWf9yhjn92Qv bbec0QKBgCGdkHnmdNA8vaikLBzTW2qNQUEso4Nz6fYYn84ANRc4IVpRE9Aqzu6N uCQhBnoMbdA1uYimOz8F+bXSvEe9RxIHMZIhxKs61xcwIe/IThwSDk9xcVSBwWQq fSUpqjJOJfJNdZtNqT1Q7DE6H53J51qpPco6Kutn9NhVlPQJdGWT -----END RSA PRIVATE KEY----- limits-3.12.0/tests/utils.py000066400000000000000000000225201462017272200157560ustar00rootroot00000000000000import asyncio import contextlib import functools import math import time from typing import Optional import pytest def fixed_start(fn): @functools.wraps(fn) def __inner(*a, **k): start = time.time() while time.time() < math.ceil(start): time.sleep(0.01) return fn(*a, **k) return __inner @contextlib.contextmanager def window(delay_end: float, delay: Optional[float] = None): start = time.time() if delay is not None: while time.time() - start < delay: time.sleep(0.001) yield (int(start), int(start + delay_end)) while time.time() - start < delay_end: time.sleep(0.001) @contextlib.asynccontextmanager async def async_window(delay_end: float, delay: Optional[float] = None): start = time.time() if delay is not None: while time.time() - start < delay: await asyncio.sleep(0.001) yield (int(start), int(start + delay_end)) while time.time() - start < delay_end: await asyncio.sleep(0.001) all_storage = pytest.mark.parametrize( "uri, args, fixture", [ pytest.param("memory://", {}, None, id="in-memory"), pytest.param( "redis://localhost:7379", {}, pytest.lazy_fixture("redis_basic"), marks=pytest.mark.redis, id="redis_basic", ), pytest.param( "memcached://localhost:22122", {}, pytest.lazy_fixture("memcached"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached", ), pytest.param( "memcached://localhost:22122,localhost:22123", {}, pytest.lazy_fixture("memcached_cluster"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached-cluster", ), pytest.param( "redis+cluster://localhost:7001/", {}, pytest.lazy_fixture("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "redis+cluster://:sekret@localhost:8400/", {}, pytest.lazy_fixture("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "redis+cluster://localhost:8301", { "ssl": True, "ssl_cert_reqs": "required", "ssl_keyfile": "./tests/tls/client.key", "ssl_certfile": "./tests/tls/client.crt", "ssl_ca_certs": "./tests/tls/ca.crt", }, pytest.lazy_fixture("redis_ssl_cluster"), marks=pytest.mark.redis_cluster, id="redis-ssl-cluster", ), pytest.param( "redis+sentinel://localhost:26379/mymaster", {"use_replicas": False}, pytest.lazy_fixture("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "mongodb://localhost:37017/", {}, pytest.lazy_fixture("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "etcd://localhost:2379", {}, pytest.lazy_fixture("etcd"), marks=[pytest.mark.etcd, pytest.mark.flaky], id="etcd", ), ], ) moving_window_storage = pytest.mark.parametrize( "uri, args, fixture", [ pytest.param("memory://", {}, None, id="in-memory"), pytest.param( "redis://localhost:7379", {}, pytest.lazy_fixture("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "redis+cluster://localhost:7001/", {}, pytest.lazy_fixture("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "redis+cluster://:sekret@localhost:8400/", {}, pytest.lazy_fixture("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "redis+cluster://localhost:8301", { "ssl": True, "ssl_cert_reqs": "required", "ssl_keyfile": "./tests/tls/client.key", "ssl_certfile": "./tests/tls/client.crt", "ssl_ca_certs": "./tests/tls/ca.crt", }, pytest.lazy_fixture("redis_ssl_cluster"), marks=pytest.mark.redis_cluster, id="redis-ssl-cluster", ), pytest.param( "redis+sentinel://localhost:26379/mymaster", {"use_replicas": False}, pytest.lazy_fixture("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "mongodb://localhost:37017/", {}, pytest.lazy_fixture("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), ], ) async_all_storage = pytest.mark.parametrize( "uri, args, fixture", [ pytest.param("async+memory://", {}, None, id="in-memory"), pytest.param( "async+redis://localhost:7379", {}, pytest.lazy_fixture("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "async+memcached://localhost:22122", {}, pytest.lazy_fixture("memcached"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached", ), pytest.param( "async+memcached://localhost:22122,localhost:22123", {}, pytest.lazy_fixture("memcached_cluster"), marks=[pytest.mark.memcached, pytest.mark.flaky], id="memcached-cluster", ), pytest.param( "async+redis+cluster://localhost:7001/", {}, pytest.lazy_fixture("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "async+redis+cluster://:sekret@localhost:8400/", {}, pytest.lazy_fixture("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "async+redis+cluster://localhost:8301", { "ssl": True, "ssl_cert_reqs": "required", "ssl_keyfile": "./tests/tls/client.key", "ssl_certfile": "./tests/tls/client.crt", "ssl_ca_certs": "./tests/tls/ca.crt", }, pytest.lazy_fixture("redis_ssl_cluster"), marks=pytest.mark.redis_cluster, id="redis-ssl-cluster", ), pytest.param( "async+redis+sentinel://localhost:26379/mymaster", {"use_replicas": False}, pytest.lazy_fixture("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "async+mongodb://localhost:37017/", {}, pytest.lazy_fixture("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), pytest.param( "async+etcd://localhost:2379", {}, pytest.lazy_fixture("etcd"), marks=[pytest.mark.etcd, pytest.mark.flaky], id="etcd", ), ], ) async_moving_window_storage = pytest.mark.parametrize( "uri, args, fixture", [ pytest.param("async+memory://", {}, None, id="in-memory"), pytest.param( "async+redis://localhost:7379", {}, pytest.lazy_fixture("redis_basic"), marks=pytest.mark.redis, id="redis", ), pytest.param( "async+redis+cluster://localhost:7001/", {}, pytest.lazy_fixture("redis_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster", ), pytest.param( "async+redis+cluster://:sekret@localhost:8400/", {}, pytest.lazy_fixture("redis_auth_cluster"), marks=pytest.mark.redis_cluster, id="redis-cluster-auth", ), pytest.param( "async+redis+cluster://localhost:8301", { "ssl": True, "ssl_cert_reqs": "required", "ssl_keyfile": "./tests/tls/client.key", "ssl_certfile": "./tests/tls/client.crt", "ssl_ca_certs": "./tests/tls/ca.crt", }, pytest.lazy_fixture("redis_ssl_cluster"), marks=pytest.mark.redis_cluster, id="redis-ssl-cluster", ), pytest.param( "async+redis+sentinel://localhost:26379/mymaster", {"use_replicas": False}, pytest.lazy_fixture("redis_sentinel"), marks=pytest.mark.redis_sentinel, id="redis-sentinel", ), pytest.param( "async+mongodb://localhost:37017/", {}, pytest.lazy_fixture("mongodb"), marks=pytest.mark.mongodb, id="mongodb", ), ], ) limits-3.12.0/versioneer.py000066400000000000000000002364341462017272200156500ustar00rootroot00000000000000# Version: 0.22 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/python-versioneer/python-versioneer * Brian Warner * License: Public Domain * Compatible with: Python 3.6, 3.7, 3.8, 3.9, 3.10 and pypy3 * [![Latest Version][pypi-image]][pypi-url] * [![Build Status][travis-image]][travis-url] This is a tool for managing a recorded version number in distutils/setuptools-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere in your $PATH * add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md)) * run `versioneer install` in your source tree, commit the results * Verify version information with `python setup.py version` ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes). The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/python-versioneer/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other languages) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## Similar projects * [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time dependency * [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of versioneer * [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools plugin ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . [pypi-image]: https://img.shields.io/pypi/v/versioneer.svg [pypi-url]: https://pypi.python.org/pypi/versioneer/ [travis-image]: https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg [travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer """ # pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring # pylint:disable=missing-class-docstring,too-many-branches,too-many-statements # pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error # pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with # pylint:disable=attribute-defined-outside-init,too-many-arguments import configparser import errno import functools import json import os import re import subprocess import sys from typing import Callable, Dict class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ( "Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND')." ) raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. my_path = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(my_path)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print( "Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(my_path), versioneer_py) ) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise OSError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.ConfigParser() with open(setup_cfg, "r") as cfg_file: parser.read_file(cfg_file) VCS = parser.get("versioneer", "VCS") # mandatory # Dict-like interface for non-mandatory entries section = parser["versioneer"] cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = section.get("style", "") cfg.versionfile_source = section.get("versionfile_source") cfg.versionfile_build = section.get("versionfile_build") cfg.tag_prefix = section.get("tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = section.get("parentdir_prefix") cfg.verbose = section.get("verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs, method): # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" HANDLERS.setdefault(vcs, {})[method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen( [command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs, ) break except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, process.returncode return stdout, process.returncode LONG_VERSION_PY[ "git" ] = r''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.22 (https://github.com/python-versioneer/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys from typing import Callable, Dict import functools def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs, method): # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs) break except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, process.returncode return stdout, process.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r'\d', r): continue if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") MATCH_ARGS = ["--match", "%%s*" %% tag_prefix] if tag_prefix else [] # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty", "--always", "--long", *MATCH_ARGS], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces): """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver): """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces): """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%%d.dev%%d" %% (post_version+1, pieces["distance"]) else: rendered += ".post0.dev%%d" %% (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_post_branch(pieces): """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r"\d", r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r"\d", r): continue if verbose: print("picking %s" % r) return { "version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date, } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return { "version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None, } @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else [] # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner( GITS, ["describe", "--tags", "--dirty", "--always", "--long", *MATCH_ARGS], cwd=root, ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: my_path = __file__ if my_path.endswith(".pyc") or my_path.endswith(".pyo"): my_path = os.path.splitext(my_path)[0] + ".py" versioneer_file = os.path.relpath(my_path) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: with open(".gitattributes", "r") as fobj: for line in fobj: if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True break except OSError: pass if not present: with open(".gitattributes", "a+") as fobj: fobj.write(f"{versionfile_source} export-subst\n") files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return { "version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None, } rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print( "Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix) ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.22) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search( r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S ) if not mo: mo = re.search( r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S ) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces): """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver): """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces): """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) else: rendered += ".post0.dev%d" % (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_post_branch(pieces): """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return { "version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None, } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return { "version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date"), } class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert ( cfg.versionfile_source is not None ), "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None, } def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(cmdclass=None): """Get the custom setuptools/distutils subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument. """ if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/python-versioneer/python-versioneer/issues/52 cmds = {} if cmdclass is None else cmdclass.copy() # we add "version" to both distutils and setuptools try: from setuptools import Command except ImportError: from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if "build_py" in cmds: _build_py = cmds["build_py"] elif "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "build_ext" in cmds: _build_ext = cmds["build_ext"] elif "setuptools" in sys.modules: from setuptools.command.build_ext import build_ext as _build_ext else: from distutils.command.build_ext import build_ext as _build_ext class cmd_build_ext(_build_ext): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_ext.run(self) if self.inplace: # build_ext --inplace will only build extensions in # build/lib<..> dir with no _version.py to write to. # As in place builds will already have a _version.py # in the module dir, we do not need to write one. return # now locate _version.py in the new build/ directory and replace # it with an updated value target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_ext"] = cmd_build_ext if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write( LONG % { "DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, } ) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if "py2exe" in sys.modules: # py2exe enabled? from py2exe.distutils_buildexe import py2exe as _py2exe class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write( LONG % { "DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, } ) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "sdist" in cmds: _sdist = cmds["sdist"] elif "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file( target_versionfile, self._versioneer_generated_versions ) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ OLD_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ INIT_PY_SNIPPET = """ from . import {0} __version__ = {0}.get_versions()['version'] """ def do_setup(): """Do main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (OSError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write( LONG % { "DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, } ) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except OSError: old = "" module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] snippet = INIT_PY_SNIPPET.format(module) if OLD_SNIPPET in old: print(" replacing boilerplate in %s" % ipy) with open(ipy, "w") as f: f.write(old.replace(OLD_SNIPPET, snippet)) elif snippet not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(snippet) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except OSError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print( " appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source ) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)