pax_global_header00006660000000000000000000000064126714233750014524gustar00rootroot0000000000000052 comment=11f57f58a9886d4c26ea920bd9507d1b5f4769c5 flask-limiter-0.9.3/000077500000000000000000000000001267142337500143005ustar00rootroot00000000000000flask-limiter-0.9.3/.codeclimate.yml000066400000000000000000000001131267142337500173450ustar00rootroot00000000000000exclude_paths: - versioneer.py - flask_limiter/_version.py - tests/* flask-limiter-0.9.3/.coveragerc000066400000000000000000000002741267142337500164240ustar00rootroot00000000000000[run] include = **/flask_limiter/* omit = /*/flask_limiter/_version* /*/flask_limiter/backports/* [report] exclude_lines = pragma: no cover raise NotImplementedError flask-limiter-0.9.3/.gitattributes000066400000000000000000000000521267142337500171700ustar00rootroot00000000000000flask_ratelimits/_version.py export-subst flask-limiter-0.9.3/.gitignore000066400000000000000000000001341267142337500162660ustar00rootroot00000000000000*.pyc *.log cover/* .coverage* .test_env .idea build/ dist/ htmlcov *egg-info* .cache .eggs flask-limiter-0.9.3/.gitmodules000066400000000000000000000001661267142337500164600ustar00rootroot00000000000000[submodule "doc/source/_themes"] path = doc/source/_themes url = git://github.com/mitsuhiko/flask-sphinx-themes.git flask-limiter-0.9.3/.landscape.yml000066400000000000000000000003611267142337500170330ustar00rootroot00000000000000doc-warnings: no test-warnings: no strictness: veryhigh max-line-length: 80 autodetect: yes requirements: - requirements/main.txt ignore-paths: - tests - doc ignore-patterns: - versioneer.py - flask_limiter/_versions.py flask-limiter-0.9.3/.travis.yml000066400000000000000000000004111267142337500164050ustar00rootroot00000000000000sudo: false language: python python: - "2.6" - "2.7" - "3.3" - "3.4" - "pypy" install: - pip install -r requirements/ci.txt services: - redis-server - memcached script: nosetests tests --with-cov -v after_success: - coveralls flask-limiter-0.9.3/CLASSIFIERS000066400000000000000000000007151267142337500157750ustar00rootroot00000000000000Development Status :: 4 - Beta Environment :: Web Environment Intended Audience :: Developers License :: OSI Approved :: MIT License Operating System :: MacOS Operating System :: POSIX :: Linux Operating System :: OS Independent Topic :: Software Development :: Libraries :: Python Modules Programming Language :: Python :: 2.6 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3.3 Programming Language :: Python :: Implementation :: PyPy flask-limiter-0.9.3/CONTRIBUTIONS.rst000066400000000000000000000005721267142337500171000ustar00rootroot00000000000000Contributions ============= * `Timothee Groleau `_ * `Zehua Liu `_ * `Guilherme Polo `_ * `Mattias Granlund `_ * `Josh Friend `_ * `Sami Hiltunen `_ * `Henning Peters `_ flask-limiter-0.9.3/HISTORY.rst000066400000000000000000000122371267142337500162000ustar00rootroot00000000000000.. :changelog: Changelog ========= 0.9.3 2016-03-14 ---------------- * Allow `reset` of limiter storage if available 0.9.2 2016-03-04 ---------------- * Deprecation warning for default `key_func` `get_ipaddr` * Support for `Retry-After` header 0.9.1 2015-11-21 ---------------- * Re-expose `enabled` property on `Limiter` instance. 0.9 2015-11-13 -------------- * In-memory fallback option for unresponsive storage * Rate limit exemption option per limit 0.8.5 2015-10-05 ---------------- * Bug fix for reported issues of missing (limits) dependency upon installation. 0.8.4 2015-10-03 ---------------- * Documentation tweaks. 0.8.2 2015-09-17 ---------------- * Remove outdated files from egg 0.8.1 2015-08-06 ---------------- * Fixed compatibility with latest version of **Flask-Restful** 0.8 2015-06-07 -------------- * No functional change 0.7.9 2015-04-02 ---------------- * Bug fix for case sensitive `methods` whitelist for `limits` decorator 0.7.8 2015-03-20 ---------------- * Hotfix for dynamic limits with blueprints * Undocumented feature to pass storage options to underlying storage backend. 0.7.6 2015-03-02 ---------------- * `methods` keyword argument for `limits` decorator to specify specific http methods to apply the rate limit to. 0.7.5 2015-02-16 ---------------- * `Custom error messages `_. 0.7.4 2015-02-03 ---------------- * Use Werkzeug TooManyRequests as the exception raised when available. 0.7.3 2015-01-30 ---------------- * Bug Fix * Fix for version comparison when monkey patching Werkzeug (`Issue 24 `_) 0.7.1 2015-01-09 ---------------- * Refactor core storage & ratelimiting strategy out into the `limits `_ package. * Remove duplicate hits when stacked rate limits are in use and a rate limit is hit. 0.7 2015-01-09 -------------- * Refactoring of RedisStorage for extensibility (`Issue 18 `_) * Bug fix: Correct default setting for enabling rate limit headers. (`Issue 22 `_) 0.6.6 2014-10-21 ---------------- * Bug fix * Fix for responses slower than rate limiting window. (`Issue 17 `_.) 0.6.5 2014-10-01 ---------------- * Bug fix: in memory storage thread safety 0.6.4 2014-08-31 ---------------- * Support for manually triggering rate limit check 0.6.3 2014-08-26 ---------------- * Header name overrides 0.6.2 2014-07-13 ---------------- * `Rate limiting for blueprints `_ 0.6.1 2014-07-11 ---------------- * per http method rate limit separation (`Recipe `_) * documentation improvements 0.6 2014-06-24 -------------- * `Shared limits between routes `_ 0.5 2014-06-13 -------------- * `Request Filters `_ 0.4.4 2014-06-13 ---------------- * Bug fix * Werkzeug < 0.9 Compatibility (`Issue 6 `_.) 0.4.3 2014-06-12 ---------------- * Hotfix : use HTTPException instead of abort to play well with other extensions. 0.4.2 2014-06-12 ---------------- * Allow configuration overrides via extension constructor 0.4.1 2014-06-04 ---------------- * Improved implementation of moving-window X-RateLimit-Reset value. 0.4 2014-05-28 -------------- * `Rate limiting headers `_ 0.3.2 2014-05-26 ---------------- * Bug fix * Memory leak when using ``Limiter.storage.MemoryStorage`` (`Issue 4 `_.) * Improved test coverage 0.3.1 2014-02-20 ---------------- * Strict version requirement on six * documentation tweaks 0.3.0 2014-02-19 ---------------- * improved logging support for multiple handlers * allow callables to be passed to ``Limiter.limit`` decorator to dynamically load rate limit strings. * add a global kill switch in flask config for all rate limits. * Bug fixes * default key function for rate limit domain wasn't accounting for X-Forwarded-For header. 0.2.2 2014-02-18 ---------------- * add new decorator to exempt routes from limiting. * Bug fixes * versioneer.py wasn't included in manifest. * configuration string for strategy was out of sync with docs. 0.2.1 2014-02-15 ---------------- * python 2.6 support via counter backport * source docs. 0.2 2014-02-15 -------------- * Implemented configurable strategies for rate limiting. * Bug fixes * better locking for in-memory storage * multi threading support for memcached storage 0.1.1 2014-02-14 ---------------- * Bug fixes * fix initializing the extension without an app * don't rate limit static files 0.1.0 2014-02-13 ---------------- * first release. flask-limiter-0.9.3/LICENSE.txt000066400000000000000000000020461267142337500161250ustar00rootroot00000000000000Copyright (c) 2014 Ali-Akber Saifee Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. flask-limiter-0.9.3/MANIFEST.in000066400000000000000000000003461267142337500160410ustar00rootroot00000000000000include README.rst include LICENSE.txt include HISTORY.rst include CONTRIBUTIONS.rst include CLASSIFIERS include versioneer.py recursive-include requirements *.txt recursive-include doc/source * recursive-include doc *.py Make* flask-limiter-0.9.3/README.rst000066400000000000000000000056661267142337500160040ustar00rootroot00000000000000.. |travis-ci| image:: https://img.shields.io/travis/alisaifee/flask-limiter/master.svg?style=flat-square :target: https://travis-ci.org/#!/alisaifee/flask-limiter?branch=master .. |coveralls| image:: https://img.shields.io/coveralls/alisaifee/flask-limiter/master.svg?style=flat-square :target: https://coveralls.io/r/alisaifee/flask-limiter?branch=master .. |pypi| image:: https://img.shields.io/pypi/v/Flask-Limiter.svg?style=flat-square :target: https://pypi.python.org/pypi/Flask-Limiter .. |license| image:: https://img.shields.io/pypi/l/Flask-Limiter.svg?style=flat-square :target: https://pypi.python.org/pypi/Flask-Limiter .. |landscape| image:: https://landscape.io/github/alisaifee/flask-limiter/master/landscape.svg?style=flat-square :target: https://landscape.io/github/alisaifee/flask-limiter/master .. |gitter| image:: https://img.shields.io/badge/gitter-join%20chat-blue.svg?style=flat-square :alt: Join the chat at https://gitter.im/alisaifee/flask-limiter :target: https://gitter.im/alisaifee/flask-limiter?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge ************* Flask-Limiter ************* |travis-ci| |coveralls| |landscape| |pypi| |gitter| |license| Flask-Limiter provides rate limiting features to flask routes. It has support for a configurable backend for storage with current implementations for in-memory, redis and memcache. Quickstart =========== Add the rate limiter to your flask app. The following example uses the default in memory implementation for storage. .. code-block:: python from flask import Flask from flask_limiter import Limiter from flask_limiter.util import get_remote_address app = Flask(__name__) limiter = Limiter( app, key_func=get_remote_address, global_limits=["2 per minute", "1 per second"], ) @app.route("/slow") @limiter.limit("1 per day") def slow(): return "24" @app.route("/fast") def fast(): return "42" @app.route("/ping") @limiter.exempt def ping(): return 'PONG' app.run() Test it out. The ``fast`` endpoint respects the global rate limit while the ``slow`` endpoint uses the decorated one. ``ping`` has no rate limit associated with it. .. code-block:: bash $ curl localhost:5000/fast 42 $ curl localhost:5000/fast 42 $ curl localhost:5000/fast 429 Too Many Requests

Too Many Requests

2 per 1 minute

$ curl localhost:5000/slow 24 $ curl localhost:5000/slow 429 Too Many Requests

Too Many Requests

1 per 1 day

$ curl localhost:5000/ping PONG $ curl localhost:5000/ping PONG $ curl localhost:5000/ping PONG $ curl localhost:5000/ping PONG `Read the docs `_ flask-limiter-0.9.3/doc/000077500000000000000000000000001267142337500150455ustar00rootroot00000000000000flask-limiter-0.9.3/doc/Makefile000066400000000000000000000152271267142337500165140ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Flask-Ratelimit.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Flask-Ratelimit.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Flask-Ratelimit" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Flask-Ratelimit" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." flask-limiter-0.9.3/doc/source/000077500000000000000000000000001267142337500163455ustar00rootroot00000000000000flask-limiter-0.9.3/doc/source/_static/000077500000000000000000000000001267142337500177735ustar00rootroot00000000000000flask-limiter-0.9.3/doc/source/_static/limiter.css000066400000000000000000000006671267142337500221630ustar00rootroot00000000000000@import url("flasky.css"); div.warning, div.attention{ background-color: #ffedcc; } div.danger { background-color: #fdf3f2; } div.info, div.note { background-color: #e7f2fa; } div.tip, div.important { background-color: #dbfaf4; } div.alert { background-color: #ffedcc; } div.admonition{ border: none; } div.admonition p.admonition-title{ font-variant: small-caps; } p.admonition-title:after{ content: ""; } flask-limiter-0.9.3/doc/source/_static/logo.png000066400000000000000000000632461267142337500214540ustar00rootroot00000000000000PNG  IHDRnx6| AiCCPICC ProfileH wTSϽ7" %z ;HQIP&vDF)VdTG"cE b PQDE݌k 5ޚYg}׺PtX4X\XffGD=HƳ.d,P&s"7C$ E6<~&S2)212 "įl+ɘ&Y4Pޚ%ᣌ\%g|eTI(L0_&l2E9r9hxgIbטifSb1+MxL 0oE%YmhYh~S=zU&ϞAYl/$ZUm@O ޜl^ ' lsk.+7oʿ9V;?#I3eE妧KD d9i,UQ h A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf2:Y~ pHYs  iTXtXML:com.adobe.xmp 2014-02-12T09:02:29 Pixelmator 3.1 1 72 5 1 72 1331 1 182 c@IDATxELJ ԇH齚@D@Bh"R"R I@tEvw˾ͽwI{wgggv9gLUGpGpGprr9#8#8#yGpGpGpG rGpGpGp#8#8#89G78#8#8+nGpGpG9弁=GpGpGp\q>8#8#8@p- 9#8#8#GpGpGpr+n9o gpGpGpWܼ8#8#8#s\qy9{#8#8#}pGpGp#[sGpGpG7#8#8#Wr@Ξ#8#8#8ypGpGpG rGpGpGp#8#8#89G78#8#8+n"P(ZXU+>"GpGUL$[3`cqπ|+nX4rdΞ#8#D\qn)dӛ_O>$|afc~{ET!6jɓ'i6L7t͏#8#Wr D! %(?§|}c_~9=|T}cNru %/bx¿0qp饗oS⣏>s)n5!@`|w_k ^x!qDʐGNn#̥m{m{#PU' ~{nMɩ=ڧ?ae lMg}f?ĉ›o^{$ie9SO=)_̺o(|S2?6wy'z衁҆3K:kWb}~ꫯz,PZ3c-53ӑw~x#f8_"ۗX& [ǒskJF2Mk>Gy[m,۬K3ɵg9K6sC/}f6ca?H_E@m\׿o}[nehNkH>hFw 7^8,BP}or-8ދRsN3ϴ榛nw^{o2Aվrk.:mO>d7n\83I5b+m#Ft+6tP\`I*_~4\q~ri#?bW^y%<#{ =\Wjgo|^(T|5\6 #Sv)A 7H\=q{P_BڶڄryZi=~@cMnխɁ;@.̾#U"Q&1XQn^{J.57tP3MHHqSJֳB}[o>_᧜rJXÁ1"6G}t` LyP.e5 [onam+0{JX-a.*Lj3>V@^:?ǢUm(guQYjP]5D`G}4 {Q.I %ebw!Sx˱ɟv%?H>(LWV^,c} ^{ms=D 9v,@[r7v>E&xTZ !\uQV˽ٴ`&M 'xbryU f~A]v&kL`_iDԤp79}9CɟPV:묒˝{K,ari8ϳaT'ˢ} EU7<"W%[{o;vlӪ*(X0+٧V!@  vmf2~Dt}7? ZBBCczW\.r˂A;)k$ON@-Z8˰_D!~GAAW8p/4E(iVPǏONߛNmB yE7m_|ލA6\E!I' #( (tO6g}:N~G3:}=_E> Vv: կ1;sQyL6lz=3VVtc.3X@'p׏1*|7/Z[߃fe_xO,;Wh'(=X/*3(gSbaFm˾GwϺ+uST֒}\mIԟJ.7N4mz˅mu8QhY6qrO%җ)\s5h|eXm ѽO.:ᒢfeX ,kHkpq-Dz>0(?0Ō"A k*;UG":-R?gԃb/C'ͨ=cE_֟b!,fm)n^G=V#&4ܾ` AoYaX *b[XJya-jD?Z|Z:}oN& Q0ý֜tˍ(H@G?S>MZkY^wGǚנԨ=#K, `RX%EUY+cY.KM4nY9 Jx)mH}D*VEd]t>뭼XFE".ʣHQOJN~/.첶^5XA bdKC<#I hŁeM>?TYFeXӎ@]ҧʸƛJ ,>Q+$a-MT.D kރ0j&6)6 ` R5.ZR+2PXV>{K_-}5Pȗ}I'-(p<,"-C5|zGSpŭSZɋ[9|oI@Yl¨QlM Fַv: ~M[|6]FPw[_.  /&D#Z.9>zTu8 G}v}rӉ}θAy_Пٰ^DWO5m.*cg cMթ>-;I`fuHұq5i,s|+nssϩOxa-mژ7, lIrE)ZH|SoM!1|ڶ}uP暶0 Om٦coDDr"X!Yqn40f\YG ݼy#P6Vg l{UWٵvkL)A,]~œ\0uN Ea*%k[`e,ER~6S|:@ve N3s Q>m5gf_zd}a=z=|oz?O7 |#b6q?DC$kz(E(l}^X8#[~fT5k@i>;@~6¢adیa ^ @(mEj, B7xêjHV_|1~a؇ f9At[o8нW`v2TF@UpF`BqNHֲiOѠKK3QLJGGh?4`oV[Ǐo?"JJV#F|BFV>xn^oF;EE Ǐ@"}շ̾6QN4 5 Y7 O^ݐ]tQFl+llRg0} "߶[UW]"M+RydrypAfz(15CR{ ʇkt<(f3)pƍ|v|%@IzBAABQQ[ѧ\?O^fc?Al䌈ɗg&i8o|#? pw_ ֶ_XV Ȓs3D Ҩz~~t:j\uyθI3lUNlY " aYGy$u}sΰ*U}:߈c6zxIשHmYVWm\|Yr+TL7I2(l}e9\>Q4y6Qܰ5 x`H}qO3Yws+v)! K-Ya厫( (6(?To HF-4RR>܋P;7l !ؑ/Jvap=fY6#ϟ'C aKQHM6lL,u/pKkwDD|5ZFM(਴S"hUVX` ݧt~FoFW6,B{.P)[k3iswY>X駟nwk6~"3+dG3v\qk{8 {PLd~_8Sg)n|㒋/ Ͽe@ EhD!:.+E jf65@@JpM7(T"կ,9B+믿rY4DX;XLN!hK݇$6ʝ&0JkPPoʣ<v֩?@(?qUbbw6ō͹С]`zf (zE?Ҷ>e/u 7E$ πSg#jCz_x_toۻ->?#P[wޕN>m曛"'kYZΔOg$"b \_p乆(|ȴl:]>6~gIxyLic9]˿;@i\q+Z̦oֶ13"@Hci/)`#ԥFYfְA(IuFj [PKPiY]Yꤨ)('뭷oqOEEEMzZ*N,o(C\t](AVHDi;J*j9Xp~y['[ PC MJk߰B3a[_/erJN"h]w-'y֝>=twp/?6R]=l>Q-oy3&EDP/޻R&0? C|u%xȤx7IC?!Go_̰d]}bH "{gl17    DЩ1ȡQ$t;XҔ:* +ĠРF27G~Ea (rb; O4L3Ibꌢ:dnmOCpD^`I;`68A@gΡxA{d}$X\\&%ɾDp$H9=<;L6) 4Pvi)&7>}%X¶pFS&_Ó#L`'~2~tGmB@o\)|ÉXQ ӉȅR *LIL !02B }/IY4(3 aXm*]JsF] 6XdZuO4pBiC${ס8?Y6KLSܸV"ZcoJxauֱE-ϑ]lwSF%akJA5>cTmȻVDK{>֒GӮJcIT"B5Ҙg֯S-o(l/@_A(@HM~7npȲP 9Y6ʆ Q[kF88kpaо馛L p` ͊Kӌs4QpYOyq$["P>;oo;(ml @ZZHy?0‡^B?mBv5׬*+=f9(U:v*zh/|,5K}P HҌ?ԏiMɰ6$RRU[ RrEBr&6>DKh'/WiqiCPj_\ٮd1'31:OkE}WpPx ڍ>p <_{~9EޱHX^8-P*GVl4a"6fa2kvŦm:QޅC\I,8 ܢO^+M:+nz$tma-0prن6A;؃JaeTOa#RJY&$rqkD9 ~77O)pQJeE)n ʃ'>}ZtE QPWs=ׄ*}KA!&4yDTb編(|XP]|a„ ʱ^80I9-=^RDQ|f h'MiOې(DI +Vk&<$(ϼkx>~tpŭvC@@+r8#øq,${IU \QJVVL *Çe\dĬ\N=X {6f+!)q7m/7xcX((78,Ҫ5ϳ<Nzj% b-X|˵ r%K.̂Τ\|iM-r/}kY4 m0YB VȲc ą'tEaCp䫼y5(+Rxx_܄>m%S(B;}ߗXPܦnH0&A>0lYql -9P ,Lp>]p+ss- TFS` f4Gfy;m@ =!uB]dMbĢv9d6wQuQ{MZAВu|l066z>> dHb8S6ed˄`#ͧ(,H 9qH}ᄙep `7(e~mA7_sK 0cDeS @Jza?fP콥~\F7}ލ+| 2+5"_{\oDɳxΠqv#[! fq<[-o%QoK"QβZk4dt)o cf3kL{篦6vX?_Ht|WrHA+$n=(t&l{f=P!$A5{x~P@Cj'Ka 6iHU֜R&6 i`"w|kO_׶XX`n{?G qܔHX¦'trDHkTyGOkl +MzbuE`-d6;e O,aA'cqwxnXۈ.Ț;\=}]ڹ&d}h _IoG7?at~T@ d'Gcb}_zIaÆuo]|kCM&VUl@&?L>ȣ5xa(}NGNoA:R̤u]l ī,4H Gmֵ<ЬCp=cafyʆ_37|sie3  F$ cM3:PvNk[I)< +aWWW !Ҫ]_:o<ZHnRt-M} IV7?,Y7bo«:\rɀ%ۨ:O]0їS<N@^ФcƌId YN9h+}п8@E\q'o өu= ʈWYQH|ѣm*֌1"r?B>FbQFšcA#$J!b"(%i^|0{WkS}1\&~{_k$ +'Q&qsjaYXz CS&jB\}ߠO?$ՒJNݸg}3q%+۞Ln,XIc/O\Z8WxgЧ(3=108b1@1nv?O~# {3U>|ᎆ[# JB+K ʅEM}U+<>U]!Ċ+h.(q(t"RmQypD)B-uz p;k^d0ʹ{oH)K$X=Q84_# ŭB 3,jСI`Esd(D}9tMvZCDB$2]R֓?;ݵ&-@9엇;6xGy]ƒRtGZ}Ζ .M멧jJKx:) Oj,\usϵ/d~l ?#2,fg@jS?? >Ah~d #F!zI[x dZo~_D,y\r%]TEQXLf2=R0,o[%#Dv!]8+w6С(!L.R_,sj'78YZo|1`S>igyl:fI)ݛkfY^ $g[}Ώ}D=R!Yؗ,*quaX6DS.JN-8wI(('YQ]I_+23yA.h *#\#RֻU3^~·#\qk^H q )8aܪT$jb$o,|Ar˪6Eۙ%O[ߥklnU#Ya9;APd9TirȊgAt2=KV7irEòAA$<攽 A "( N@'&<ꪫO\&YF<7pkj/;@p-2Y.x bP橧 t =XٟNrympciTu,Z2&J rGL)"$3rcO7L 9E w3<3\qvZ<+ ۰כ'"`)Q\kI`g絯u3zj:0_Sgk=#lr$똉x7Hy %zƌf:yB<E/f,8W_=IzsUϽ/c7%CDӦ뮳L*<"[[ep KZĵ^;E }vVWf[)~9>:9k)?Q))ۺkg*-ש7P+'} %uܕާiMs_tEv ~إ,3id_^Y!,U_C%}B@1D+O amxRhӾzfї,4׻We0V6z/Ev(lTm(>Ƅ[%kPG *Ki)& 3<>1C{c}!ŗt uJK>cw{ I!h AyLU +ۗy}*`Tp;ܶX0S#k3$$3jG @PxKyՇ_IE(bmL%}%eqF7lJYu (mӟA~pyʪ؊`%"YI`vbĈol~(eaִe1l5{{z>z?-^xᅰ_V kV.}M@ݨ~qƠ#co˜', Xh Q(Ľ Q#}L]Q q0i ϕ nc QQ#JY%ף`b~fv\]Xxŵ7{m-D+ܗ(+_! Fv. v&r)(@2 Pj5*&^< C-ębKGzWQ-3v~=\7*PvLdڹ_#xV+@d![>2]"Zz!Ey7Ad嬟pPSenJ{y2YNqZs]@ݢW«}q=J7E5PFXq}ƬK&Qb𝂞G1FLɉ*Z9Q9voA 2%1}M3-V^;xqw eqDS .MﶬCT,?ۨ$&)ʊHq߸]Rm@!N3M|Wׂu 99MG!P-0ɋvq饗A !W&UE:teY q#_Sz*A;] z_|q!!V  wQ@IØP-pV®I0"]zGꪫ|^W83oq&@V9eߣ%1q!sbp_{YFK&|'ZL1?(>F_y 1b`E ȧЭ)hM*`%G64)Q|{GH/t]FwuI8;_: )[7ЦEm__埭kZZL0I ֽvnVfFx3U[vuuY`HU[H.d@t.DP_Vd{]d۰i#-+Ph5F"LXngƋVZÏ6ª?ѫ,<u0yE]=8 MTlؕ 8#:qRFy3~C>DLDaRG&)xH;6,}~̘1Ɠ)Cp5cs 'CZ%}ɉOO96n 6m,ډ+e:((Yd2څ qVBPmwUVHYAS_5}~N9{2N nl〻s&lbtoKAHK%+u ,XJhMOx -RgbFEb/ FtОx]{y睗ȿ 1gy[cTY\x7è=G3+.v83, up_!ډYjfe֚mpw޿֊#=N=; tVlFK/}{v )#Џpŭ7n&a}f#a᭫6JYrc " "Hece-+g@֛@Aa8nNŀ"Hs1U. $J 9QP4]v}0Gr]x 9g-RBrJ5%FK_`_btډ ' ܃>ia)_@Pxv ?y:裏Z #JWEZ#"H|V/RH.\z\?zxM1 \> )l$}[Vjlf҂>Z}/x҉6Qm>'|ρG&#:3sFi"]r%Y=3EE%eAax*OFO{8()`H,j&Ȣ(ܕZD? ȗ6B\_-Kj\#yfzQ'xHy1!Ż-1V_뮻lZ2&xOVK/|`}O?`rjTtxyײM y&D=MpxIM4"^"4M ~2>ߨo:ʐyF A)BLKaQd4MXP!RJ3첋Y#y`C0fBBr*n8Wbr w][P'ٰOr̗GfMBɓWIIn(0 K*Q|-<Ϭ>-0lRM3Ֆy~Bjo$< G&F` ,3*Hۂp?>(n aD「[l-gJ+dyU"Ձw,~O̚ɻ"Ʊ}G>G_`ŭDT_<x[᛾K{(L99 nc=H*8US^dhZ0p3pQ^_t 30coҧ{|W,.zYbvAm77\6! G8 Ŋp<0 7ܰKxi\@t 0 @AȎǬj9­ڋK) ryvNRRzK_5ڇ欥2PRKXM4 ~QqN TI2+"?fZ>Ix\w+S-lUI[bxd?>0e)xn 7/%Om~4"QY9@\43h0#.U0#,0,_%6lf)UQ8pA L$3x'J (|XP^P-(&9fYӆ\(=S,)XQE`wW\a~˰M^O'riPqyM ]اeGBSk/(cJSTjPBI?yT*Cyi*}jdTzV,QW.oRF{}Wc2埽 X"}KxJWT{U3c#)XU4- v=#\qk ^e_M l#5]xRZJyC4'I[+}]VF8k[Wz%pOLVM,G_Q;ͽLy׹pGpZ+nKr@QqBwZpOQ)}*RV`R^{Ӽ >! uD$:)ָ,oUW X<ӤQkpGp+nswGH1N+[.X2&BkClȍ ٟ;#8DVe9#cPh_ƌpe{WgG`.rav C lF=ɨE|EmN6O<N9ۼ϶}z/9#8G738@`Xں /X$c9pI|L= GJsa*_a2ZZ>9cm#n e^ Ǐ?pxꩧh|76K`rҿ8#8WA#zGh&Y eaܸqK.I' 7t"Ygն@B7"b%.(soVxǃd7X_[.g=pG4\qs~Gh#YKĉMg}zp6s,nX 1,tNk:-Q::,{DD)fyfs\kl-5mB#8 WSkz]GhY+ ||p}I&7(j(`Q.$-2!O9{" > A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf2:Y~ pHYs  IDAThŚ UEo}@)TPPD['TD m5Z6 qE[C1FFGE0T$TЁVdAAf,*ڗw/uBi*;#˻essgaB6䔋/xeYCD=Oj,((SO6lX4uOw嗏 ݻ]v曯]ve_SM4GmwqG---EgdffFtnMVV&}ئnz tyݻwo\|hD߾}nj;ߨѣ jW/Rq">3?~ww_BCe7|s??--ݻwPw(rKw&Hx ܧ_E &BL79H%SqUUSSLkkkSguVGF[ˊRh"Ceb1/P& 2zAzcԚvOGi2`Qw-׎#\}Mmm?\,3Fc0v}K6Uی[&IAwBsB_9##@LR@EF86!  M7dm۶-޼y~0BSÎl!4<^@Q@67oټ{ǧ c]GEȧ?01D+pN6mZ~iIAoi (R#H `@>hРNneҥu]W2|A1 d2f+ %6HJJH4Fj^1Z4!ՎGO;5fklzsSMӵw;͏S^=3UV9xF" vrJal zSrK; w?,SߪM_+$/3_{0iCЀ$y "~8#~睹wiB:T-҂oϘ+TҞ~C&6ӄ ?|oEy$0d؟I-'g"&9B"Xj Ed ¥]z۬g-f@ĕ"HXFяO'u֍d:fR`@|ϟ?/X7jԨ,&A"}%q[)4&DRX-RK#i1 |mbx^|ӧOsx7 iӎ/Iy55چ|뭷*m,BX0-荄#>o#$y[Q7kBC*Łs?r9RX2,X05NDrQ1joE,x#D6vӵk-[4RDuh-ge)t RIV>ĸaH>]x; 6|[oEy``,^ 0 qz>,]ԉ#Lk\|ՠ>믿F>(1W u[:TqL"dhMoH҇$ \I(9\v8}}~mG4qpC9?{݅&M@tGq(Ѥ=x֯[uW L< .SxLgf֫26/La˼"Ɗ'˷p:gzA`:R0".H~rs0lǑc_(o$cuhHFf.&؈)W:GŁ(R|ֱ)!­Ad{\-x8SH) >: 08f89XeK9vbLDi6*c #\UX lPK657Y`/^.1r< mCx?n 9qKpø8npg mZf@FN}Qi\cO?WL'Rskۥ'X0,wV`)SGh!0>|eo!gqewq.߆A瘌-ܖIdO uEc Ao"@u -d3+펹< ':r[  OƅK20?aV(=-bt0E1.2c8+jsށ[n+b>Yh$sq]Mx^gv~4ӎ{?W`0騵) x )hĴETss aA;gs ;Dgݙ.IQNģ,mЀ>g2yu|Y!\gW;G 1~듛srsRxXrH#/OqU!1[Ũ^#ֲZH= {>xz)EV9S.bOVP(K f l0]ܛ>FX,-E˖KtC3nz'ˎ0uV6T\΃}x@Z@darix'jYp?vh[KJb"O[5`tz%nE} v[ Y(Z/x~@`8FbfTHLa){F6;1JĉmNz߰Yyl,:8J?n]unӳO!+?"=3M``WVUv 1 V4SK*0-_TFZFP5$FsxaZi>n0()mL8Aj")C!4,bKVE g43tgK7OO4dPHkh-=T*>ߤ8N^.[`L{x 5a Ipcy\>eNAC kFRIENDB`flask-limiter-0.9.3/doc/source/_static/tap-logo.png000066400000000000000000000507061267142337500222330ustar00rootroot00000000000000PNG  IHDRj6q AiCCPICC ProfileH wTSϽ7" %z ;HQIP&vDF)VdTG"cE b PQDE݌k 5ޚYg}׺PtX4X\XffGD=HƳ.d,P&s"7C$ E6<~&S2)212 "įl+ɘ&Y4Pޚ%ᣌ\%g|eTI(L0_&l2E9r9hxgIbטifSb1+MxL 0oE%YmhYh~S=zU&ϞAYl/$ZUm@O ޜl^ ' lsk.+7oʿ9V;?#I3eE妧KD d9i,UQ h A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf2:Y~ pHYs  @IDATx^U6'@:"`U76 Eտ b[Ep bA]i !L$93gª/.{{[={='x̣_W&밮XGzz%]t/>\ }sb=_]&G9oM{ ,J#@$T D{Oi⹝mmGxO)Rc7,]vmQG9ǪsTj;g?kG+**.r#)TO$ZC彶M^M0XL}0p/p6]QE(On$DDn v+"v,ssL&3ƶ>tC?>66ֺnݺ /g=Ydv/|j#ykj nDر vR=yB/>gY`W w XώS)K9:`C,co eC8vZh;_UVVv~piO{SԱ;=_87_z䯿6^w]ve=ցJM ٳgbWŶ^TD@faRCCCzKSB}}}7HOnxCPJ$Q'*GiLKQlnn.Κ5XSS;Ni6*qo|TS---S/'>1<__=| lܸq][[[+}wqzf)1_ xkomw*{/_~+^9UU3X9DW@m:.T_S.\N%K-[VZo%9ĭ-~ H,\4o޼hJO&Xhj* .fD*ϧ4(:b1ŋٶm[n}_<蠃?SyI'^`AEggg??pO<1d;|[s~@' c r`)K}HMz=a6yWV/+y{^wagM<u >8e#$ /ϡp)u:o#{ZQb}y^.g{WiUӨ-vX Uyᇧ@GVӟTغu:I5tjz;~`[S1}m6!TG;}9$1?6o\`6$f,ETաFqCa淾s!Z#ަ恑0QJ!._Jp(:C^ß6/"Hx(H/zыFN=N$CPY/3+m[ " P%x&c@$ennd&$u$|=94{饗* |Gy_dNHrrE؂$CՍ\2:lްaCuO^J3}FaN]J}sӀ ;!V`:4 D*|_6Taj Sxdzb]kV_ {7QSre jsgtm7;PC}{7fs}q75^4zuוpHRAyf͚9ݹ)_3#Uy w#y_<۝AVЛl4s6YI6(8mcwC?e""R4/#%@[« ޏbX KRI_| ~X ,A(8饉(. 9%Ef>#JJ/kd.buֈxq$A|zw}՜OIbym^ws¹`K9ֈ}R IZq2֨>³9SL}qbLakW֠&^\..L"$ʕS_KX'5#'˖#Z"I jH]/QӫpuT7?ș'L/W[D)LIb6\jE< wH2V/jjs\flt,qp lXU57{=v+,(x\Lq,!~ H n `&x1&H,oJO~4оHHU He@V,30f VKhGvؾ^a rێ6DYmCF~#Up"^umj~0 bto$]%8ǻ!]N۲c[m8]=&y{衇@c7ȣ$S:5hndd{~Dmwu ƆұxC!{!u=1lo@ i Jpz OÀn$ģ+x߅J-ٍy6%ehb'cO/AvREsWy}!%Iy~v7'GDv|:d1#RCz]w]">HZ@(W4EՏ7PBDfXݮ1nPJXĎn}D@ u=RH8%ꅼmDf<~#^i3,r^Q^tTk,$D蔄*"_B}`J'g絧TNc?"%ǀj2C{O{!Tt|t%/^De#;Q-zl6۫ c&M)y|{:)^;yio@vJob_ɱcQNᘄӽD/U-hQist"6< N$:D~]Yнdop4` bcXKg#_硫[]q% 8zn<:~|x= /!ZRٟcߜEi13t=g7=7Ye["ʶ46d=5 eᕀW0Dv$Ǵ]f`+ x݀ҥFew1s.s#7] <x"0=kG-\!Ljq;XʎˮASӶȫ~MB_VmUvw[ @I& H@`$@Iv/{to%&>~,"}0\?H1q`g ͽ-l3_saOFBμ6% `:Q~?7kÀ])D M>kћ[;շMvJ/Z!@ir SҖ6pH/:od.!X1ꉌ" #f.N\(mJϯsrڦ7;>@p G@d$_'50p% r׺U2{{y?eWr.9l@G)Q5@OvCiwp!` %TmUVʔ:x>7hϒGDux=~x\N6p<,K=Iy.pM؛O;0hD~sٗ@|醻x!R`>`K:b<.:.-YW·Ȝ9F*u(ٍjf{IazR{DbVFnI|n9$Wyf+,V@j!vr}DK.3/^ VԙH{+_v;@37ƽy 6,{Y"=JHE}={.Ǡ 崠)>vn 3g6dq盅 $@p_MT '[bmHH)LɛÑẚgJ&Z`cկ$e)K1LE7HgFO8.$+{[=&()n wӄC|;S9¬yV&QV0U%{{ N/6s83 3F<+d#Qxz_a@xu1Gfy.frXhY&ʢMM]13IfE*R֍ ֢vPE*GF&`Ǚ+I +`(I g7A4=Xeo"D2d_D*;3UM*T45PkNBU&8'R寇ONJU]~7&y^fή1uAd'o7%?~&w#$zAvfժUDaV1gLOU1M𹝞{RTtFI3oƩJ);A eo#g2*c$ bc,yo~(K}GZڸĔsaw&(3Ev ZA1L!%&%ys"]=lL67 GR:#H9"C{QHy智$g%PX:UF Խ54yuA5\_ʕrQsRVĊzi+ Ft]eQUK"]e sHJԐP0Ui.l+^f29΍1'g!\U-_ͺSԧB!9} , }.Iʔ lO&V)NoyGipѭj\b,r $@Q1XyCmK,BUD1˺=I?(Ϧ~ bP,eQ%~fA0[nQE@b _j$KZ0sg-69˗/''x()E`ꐴJH1d5쏊r"͌)  $^ײ[ 9jIWUjX4܉.c4j0IKt XgJ^= <367"-pעFXB-nv[ p8^AU׳9WL`p4=C(|Ѣny+o=$'0tln"IvMAa7S%kjeQӱKC\P"T'|I`~I Gc3$TݓR߬vA3f0VZu b߼w1o z,7F$O稨 Dc)riq0z޶"ILQpHLӺ3<3>:xi HG\ mXC<1u}Y[l $:IvܡՌ#qXZQ>uElhy\KGb ]Wۿ[d%ޚH%1dX$x97,<<:"5yrxUxm)$0l 4/qXBa)&MnB4i Z+HHH* mj|jr_aA|3qa9Ʃ2h?kǴj> 6TΩr %"ڎOAZtuUM& R1@c@ vlQ{;  =HAQ W`31A" CM*p4K7KJwqG ?Erhej[ؠ`6~wI'#3Ϭ&On5v$U`pW^y%lWK$H 90Gd#Od,iVAՓ+9搯$v7_¾Me;J$w=_a??~}kL2.*t=}t5%>n!TSu&LJHcB[;:s3\JHHZҩZMʷk"a !1w54x}S_\o$!V*p$0#x=&JTdB )>?"BGc"BQ_+t*M28sKhbqxG6#Qy$b+At駟ޤE֏Tkw?OG ٨nSUEܮm{DO99ѣzusA/'(N9 ]윯H0SوT ryxoMQ腳x@mdʜO8°sHo"9~5/}+e| e]zV-,̇z-a[ Ma|<q qP%~u>n'݊[wk=v1orÀ]ꪫ}c w}WgcjiqpTbwwz;8P*w?Vv'NaiK.Ę*J}_m4: ;M`~򖷼9NțTPrSO=)+I94$z$AD, j+aŸha3THQ:Ӌ<Ce61>b}_ d(|H{iYSWqW Z81 4q(j(@sa-= P; pJ*b- p. 1 8CJo$Ikv Ӵ$!FȄ#5@1x|ETq\҉ʮ1aH} wRb>s跿m#b_U@s;{J#y)Xio*HR/jkmv2/XدKAN8BbKrku?^W7%D]18* Qq炰&‚%v ^b}0E~2 xr#ئJxIlB2KT+9޿cfK^Zͻ!~S"=_ii"waP烤mpZ͛~+0cHu`Tv[ʮ^z5vU? ~0͌g%\B-pq :FRH86+&0ÖI*]r$j3Yv4JGGzjEv̢! FnqɣN3z酶iO[uH%6q/ 3R =e@O>8 q8ޣ!f5/[ö;5ը1I)qO2S^{A,\y:EÔd%ZY_"Y4Dqz"o;s8%[!uK0ybbVw)"UzQ-87q /.T1vH;ʡZ#PjTYqU[:w?y-A=@u%HJ- '=4k0M˖^Ǚ/8oAj38"ԓI%ޙJTtwir B20Y " "u)ZÎ8lh+0qjpXF%% 7Chi i*]|F5;LF. YB~rr4KhAH3lA r.T R+)z9a<7o76 Z)|&8*sJrwwI%ž!M 6 sh f*`;p2+_5J1dzu^ 11!ԛV8Htw)ٻ!1Z逤ZWR]bLz^xhF%f܅m8[rmNkrxmx.asn `̰j%N\G>Dt0oQ͗E=zpgOgНցdBmf&uA%z'kxvn ؽ5f0[B7`nǫkEB n 눵L]ٮŽjk)c_ǩNTjT.,eC(xG';py8 h3Y-/z<~/p B;lEiAS"beC Z3EՋ-{ +AQm`z`+B7>պCjwwZ#kwݗqSvet<\nu:opl "0T)Pxܼ_XvTfD[6|FѵĆx !ؠu[M0J7WT`?G7?ExPD-ς qJiدsi{.O:G2H{>HȔXK8 !LC% D~/zszWN!gX/R/i+° A"E5!a53zWϏtw7$uMį|&Gb\A|XMbpjp"8rMF%dzQw u xs^Uzh9qڰSxpN|kʎ[~Ln`bŗv8v#*)r#5u?Q[k߮#+eIm%ARda`V*Lω#{7"uwiCcc}^wـwy.{q8}VAb?AJwkmoč/'5;|goPc/Ǧʘ󌔈cϲVP$63 '˜.n53\Adwz/h0i IzLv2ew,Qz'k鎱TqVyo|v\m$/+:<}%c׻s L 2iQgg=ɨY{:>'ݕd0Ӈh C;"(XMɛl=`&Q@Z)s< ٍǙ$PEg_<4FȐ#m'2xv8+VndÝ/:GlK՞"6@L r73+@,[1gt@  q 98ǝxgB{^GiF.%離w&vWqMgnW$"2 Lt|68 \hh{{Dqz$D"r QbQDڨL qlzƱr,†FT[?lNߥh\c(VgUNG$.IM/K\k#nxZagh㰠&pE} t饻A::J=U:̳p%+Y3N@[›}WGE&f#`׋A!Cb~jsX\!h.'`xyDZ9t?39!/ATdT|hSHeCr}I U5ONʡl_WPċkP9`4AˏƸ[5Fuغ$ 'VeXmT+FNC@sc2,' m_T$IB8?KMs3{ǸYb,M_%sa^H}# Q>53Bc^vgE4*DD~uq582E\pՙ*tGƓAyxpx "tgӨ5YT^5[86RQ\29fdFq5)ScI*T`'/0IDAT7K+"@0MOg `^"j+wҷ4^j9P19BᄺN:*S87U0MB6KǗ5G }d"-< Zbj]Gl&>H2W!ɹi1u C1LDRgmV"5a,9|BSմM7x:UA1abzb 9|Ag)2xSBG،+nv+Y!Tv $}ɑgJNtF2_ <^a H As@f^b*?UW 5 cSpnM8qCr) v l^٢j3-USW6Tu 7%PxCqtQt-gޛu/ّ<׏q ;ci .\qn+Kf\lT|}Cc.Ww .cdb֝Y Ajz{3۸[T`LEىVõÖS5՘FBviMwb!-eA$Rne{D>=15 ~`B!yH6@iBJdq͛8Fr"V=D${f?I>s g;\ ՂZjC:@ZE9sYDxgWU5eܺ N;g 08?G2d> x rz9aufS7֏ 83NrS}!gCM^Cvah?qQm?taK~;m[f=O s!7l˜ B8pH*Y?s0hd^֢D;^".Ĺ1؊=Ըv皡pe=#o(]@]:b {=ĝ=voĭ_\3&u2끲8ebb=`wS!Qf FQ\M@ J&U!Q]8 cHGXaf~A}COg>uPJb,4ٌ/&QPO@RR>7Y RbWB8_ iJǜ 8H\7讯!ZEP  }J|vLr$8p:>ϧ>CQ{!;E#WNnTQblZuyr:꡷O@3|ہ[?J j)ޣ)%`krJ*"' W;H. qZTS;NA/HC_EU-꫎DHN9׹F2r*qpB*+pP ئT6"k.Q[ǾTRYr%TVՃ8L2LlwzAK:R#RĎN#t ~t@Z&ϱQ;'1=.;!PT_!]Yr#barvIF9ʵl_4b뢓b (sdH&t<aY5Ñ+Wu6rf*?w613ׂ,4?OOɲR ZM0vǶ!H E4"*9E][n#I2 3^r`5\ñ^% vM)7BUj?uve2zm8 9py鎔He/#ӐbryT.&$|XMv1t3D,F(5afJ1 8,(?ڜ^ p<:#ڃDb,˦oIENDB`flask-limiter-0.9.3/doc/source/_templates/000077500000000000000000000000001267142337500205025ustar00rootroot00000000000000flask-limiter-0.9.3/doc/source/_templates/sidebarintro.html000066400000000000000000000017771267142337500240710ustar00rootroot00000000000000

About

Flask-Limiter provides rate limiting features to flask routes. It has support for a configurable backend for storage with current implementations for in-memory, redis and memcache.

Useful Links

flask-limiter-0.9.3/doc/source/_themes/000077500000000000000000000000001267142337500177715ustar00rootroot00000000000000flask-limiter-0.9.3/doc/source/conf.py000066400000000000000000000033311267142337500176440ustar00rootroot00000000000000# -*- coding: utf-8 -*- # import sys import os sys.path.insert(0, os.path.abspath('../../')) sys.path.append(os.path.abspath('_themes')) import flask_limiter extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.viewcode', ] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = u'Flask-Limiter' copyright = u'2014, Ali-Akber Saifee' version = release = flask_limiter.__version__ exclude_patterns = [] pygments_style = 'sphinx' html_theme_options = { "index_logo": "logo.png" } html_theme_path = ["_themes"] html_theme = 'flask' html_static_path = ['_static'] html_style = 'limiter.css' htmlhelp_basename = 'Flask-Ratelimitdoc' html_logo = 'tap-logo.png' html_favicon = 'tap-icon.png' html_sidebars = { 'index': ['sidebarintro.html', 'localtoc.html', 'sourcelink.html', 'searchbox.html'], '**': ['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'] } latex_documents = [ ('index', 'Flask-Limiter.tex', u'Flask-Limiter Documentation', u'Ali-Akber Saifee', 'manual'), ] man_pages = [ ('index', 'flask-limiter', u'Flask-Limiter Documentation', [u'Ali-Akber Saifee'], 1) ] texinfo_documents = [ ('index', 'Flask-Limiter', u'Flask-Limiter Documentation', u'Ali-Akber Saifee', 'Flask-Limiter', 'One line description of project.', 'Miscellaneous'), ] intersphinx_mapping = {'python': ('http://docs.python.org/', None) , 'flask': ("http://flask.pocoo.org/docs/", None) , 'limits': ("http://limits.readthedocs.org/en/latest/", None) , 'flaskrestful': ('http://flask-restful.readthedocs.org/en/latest/', None) } autodoc_default_flags = [ "members" , "show-inheritance" ] flask-limiter-0.9.3/doc/source/index.rst000066400000000000000000000551271267142337500202200ustar00rootroot00000000000000.. _pymemcache: https://pypi.python.org/pypi/pymemcache .. _redis: https://pypi.python.org/pypi/redis .. _github issue #41: https://github.com/alisaifee/flask-limiter/issues/41 .. _flask apps and ip spoofing: http://esd.io/blog/flask-apps-heroku-real-ip-spoofing.html .. _RFC2616: https://tools.ietf.org/html/rfc2616#section-14.37 ************* Flask-Limiter ************* .. currentmodule:: flask_limiter Usage ===== Quick start ----------- .. code-block:: python from flask import Flask from flask_limiter import Limiter from flask_limiter.util import get_remote_address app = Flask(__name__) limiter = Limiter( app, key_func=get_remote_address, global_limits=["200 per day", "50 per hour"]) ) @app.route("/slow") @limiter.limit("1 per day") def slow(): return "24" @app.route("/fast") def fast(): return "42" @app.route("/ping") @limiter.exempt def ping(): return "PONG" The above Flask app will have the following rate limiting characteristics: * Rate limiting by `remote_address` of the request * A global rate limit of 200 per day, and 50 per hour applied to all routes. * The ``slow`` route having an explicit rate limit decorator will bypass the global rate limit and only allow 1 request per day. * The ``ping`` route will be exempt from any global rate limits. .. note:: The built in flask static files routes are also exempt from rate limits. Every time a request exceeds the rate limit, the view function will not get called and instead a `429 `_ http error will be raised. The Flask-Limiter extension --------------------------- The extension can be initialized with the :class:`flask.Flask` application in the usual ways. Using the constructor .. code-block:: python from flask_limiter import Limiter from flask_limiter.util import get_remote_address .... limiter = Limiter(app, key_func=get_remote_address) Using ``init_app`` .. code-block:: python limiter = Limiter(key_func=get_remote_address) limiter.init_app(app) .. _ratelimit-domain: Rate Limit Domain ----------------- Each :class:`Limiter` instance is initialized with a `key_func` which returns the bucket in which each request is put into when evaluating whether it is within the rate limit or not. .. danger:: Earlier versions of Flask-Limiter defaulted the rate limiting domain to the requesting users' ip-address retreived via the :func:`flask_limiter.util.get_ipaddr` function. This behavior is being deprecated (since version `0.9.2`) as it can be susceptible to ip spoofing with certain environment setups (more details at `github issue #41`_ & `flask apps and ip spoofing`_). It is now recommended to explicitly provide a keying function as part of the :class:`Limiter` initialization (:ref:`keyfunc-customization`). Two utility methods are still provided: * :func:`flask_limiter.util.get_ipaddr`: uses the last ip address in the `X-Forwarded-For` header, else falls back to the `remote_address` of the request * :func:`flask_limiter.util.get_remote_address`: uses the `remote_address` of the request. Decorators ---------- The decorators made available as instance methods of the :class:`Limiter` instance are .. _ratelimit-decorator-limit: :meth:`Limiter.limit` There are a few ways of using this decorator depending on your preference and use-case. Single decorator The limit string can be a single limit or a delimiter separated string .. code-block:: python @app.route("....") @limiter.limit("100/day;10/hour;1/minute") def my_route() ... Multiple decorators The limit string can be a single limit or a delimiter separated string or a combination of both. .. code-block:: python @app.route("....") @limiter.limit("100/day") @limiter.limit("10/hour") @limiter.limit("1/minute") def my_route(): ... Custom keying function By default rate limits are applied based on the key function that the :class:`Limiter` instance was initialized with. You can implement your own function to retrieve the key to rate limit by when decorating individual routes. Take a look at :ref:`keyfunc-customization` for some examples.. .. code-block:: python def my_key_func(): ... @app.route("...") @limiter.limit("100/day", my_key_func) def my_route(): ... .. note:: The key function is called from within a :ref:`flask request context `. Dynamically loaded limit string(s) There may be situations where the rate limits need to be retrieved from sources external to the code (database, remote api, etc...). This can be achieved by providing a callable to the decorator. .. code-block:: python def rate_limit_from_config(): return current_app.config.get("CUSTOM_LIMIT", "10/s") @app.route("...") @limiter.limit(rate_limit_from_config) def my_route(): ... .. danger:: The provided callable will be called for every request on the decorated route. For expensive retrievals, consider caching the response. .. note:: The callable is called from within a :ref:`flask request context `. Exemption conditions Each limit can be exempted when given conditions are fulfilled. These conditions can be specified by supplying a callable as an ```exempt_when``` argument when defining the limit. .. code-block:: python @app.route("/expensive") @limiter.limit("100/day", exempt_when=lambda: current_user.is_admin) def expensive_route(): ... .. _ratelimit-decorator-shared-limit: :meth:`Limiter.shared_limit` For scenarios where a rate limit should be shared by multiple routes (For example when you want to protect routes using the same resource with an umbrella rate limit). Named shared limit .. code-block:: python mysql_limit = limiter.shared_limit("100/hour", scope="mysql") @app.route("..") @mysql_limit def r1(): ... @app.route("..") @mysql_limit def r2(): ... Dynamic shared limit: when a callable is passed as scope, the return value of the function will be used as the scope. .. code-block:: python def host_scope(): return request.host host_limit = limiter.shared_limit("100/hour", scope=host_scope) @app.route("..") @host_limit def r1(): ... @app.route("..") @host_limit def r2(): ... .. note:: Shared rate limits provide the same conveniences as individual rate limits * Can be chained with other shared limits or individual limits * Accept keying functions * Accept callables to determine the rate limit value .. _ratelimit-decorator-exempt: :meth:`Limiter.exempt` This decorator simply marks a route as being exempt from any rate limits. .. _ratelimit-decorator-request-filter: :meth:`Limiter.request_filter` This decorator simply marks a function as a filter for requests that are going to be tested for rate limits. If any of the request filters return ``True`` no rate limiting will be performed for that request. This mechanism can be used to create custom white lists. .. code-block:: python @limiter.request_filter def header_whitelist(): return request.headers.get("X-Internal", "") == "true" @limiter.request_filter def ip_whitelist(): return request.remote_addr == "127.0.0.1" In the above example, any request that contains the header ``X-Internal: true`` or originates from localhost will not be rate limited. .. _ratelimit-conf: Configuration ============= The following flask configuration values are honored by :class:`Limiter`. If the corresponding configuration is passed in to the :class:`Limiter` constructor, those will take precedence. .. tabularcolumns:: |p{6.5cm}|p{8.5cm}| ===================================== ================================================ ``RATELIMIT_GLOBAL`` A comma (or some other delimiter) separated string that will be used to apply a global limit on all routes. If not provided, the global limits can be passed to the :class:`Limiter` constructor as well (the values passed to the constructor take precedence over those in the config). :ref:`ratelimit-string` for details. ``RATELIMIT_STORAGE_URL`` One of ``memory://`` or ``redis://host:port`` or ``memcached://host:port``. Using the redis storage requires the installation of the `redis`_ package while memcached relies on the `pymemcache`_ package. (For details refer to :ref:`storage-scheme`) ``RATELIMIT_STORAGE_OPTIONS`` A dictionary to set extra options to be passed to the storage implementation upon initialization. (Useful if you're subclassing :class:`limits.storage.Storage` to create a custom Storage backend.) ``RATELIMIT_STRATEGY`` The rate limiting strategy to use. :ref:`ratelimit-strategy` for details. ``RATELIMIT_HEADERS_ENABLED`` Enables returning :ref:`ratelimit-headers`. Defaults to ``False`` ``RATELIMIT_ENABLED`` Overall kill switch for rate limits. Defaults to ``True`` ``RATELIMIT_HEADER_LIMIT`` Header for the current rate limit. Defaults to ``X-RateLimit-Limit`` ``RATELIMIT_HEADER_RESET`` Header for the reset time of the current rate limit. Defaults to ``X-RateLimit-Reset`` ``RATELIMIT_HEADER_REMAINING`` Header for the number of requests remaining in the current rate limit. Defaults to ``X-RateLimit-Remaining`` ``RATELIMIT_HEADER_REMAINING`` Header for when the client should retry the request. Defaults to ``Retry-After`` ``RATELIMIT_HEADER_REMAINING_VALUE`` Allows configuration of how the value of the `Retry-After` header is rendered. One of `http-date` or `delta-seconds`. (`RFC2616`_). ``RATELIMIT_SWALLOW_ERRORS`` Whether to allow failures while attempting to perform a rate limit such as errors with downstream storage. Setting this value to ``True`` will effectively disable rate limiting for requests where an error has occurred. ``RATELIMIT_IN_MEMORY_FALLBACK`` A comma (or some other delimiter) separated string that will be used when the configured storage is down. ===================================== ================================================ .. _ratelimit-string: Rate limit string notation ========================== Rate limits are specified as strings following the format: [count] [per|/] [n (optional)] [second|minute|hour|day|month|year] You can combine multiple rate limits by separating them with a delimiter of your choice. Examples -------- * 10 per hour * 10/hour * 10/hour;100/day;2000 per year * 100/day, 500/7days .. warning:: If rate limit strings that are provided to the :meth:`Limiter.limit` decorator are malformed and can't be parsed the decorated route will fall back to the global rate limit(s) and an ``ERROR`` log message will be emitted. Refer to :ref:`logging` for more details on capturing this information. Malformed global rate limit strings will however raise an exception as they are evaluated early enough to not cause disruption to a running application. .. _ratelimit-strategy: Rate limiting strategies ======================== Flask-Limiter comes with three different rate limiting strategies built-in. Pick the one that works for your use-case by specifying it in your flask config as ``RATELIMIT_STRATEGY`` (one of ``fixed-window``, ``fixed-window-elastic-expiry``, or ``moving-window``), or as a constructor keyword argument. The default configuration is ``fixed-window``. Fixed Window ------------ This is the most memory efficient strategy to use as it maintains one counter per resource and rate limit. It does however have its drawbacks as it allows bursts within each window - thus allowing an 'attacker' to by-pass the limits. The effects of these bursts can be partially circumvented by enforcing multiple granularities of windows per resource. For example, if you specify a ``100/minute`` rate limit on a route, this strategy will allow 100 hits in the last second of one window and a 100 more in the first second of the next window. To ensure that such bursts are managed, you could add a second rate limit of ``2/second`` on the same route. Fixed Window with Elastic Expiry -------------------------------- This strategy works almost identically to the Fixed Window strategy with the exception that each hit results in the extension of the window. This strategy works well for creating large penalties for breaching a rate limit. For example, if you specify a ``100/minute`` rate limit on a route and it is being attacked at the rate of 5 hits per second for 2 minutes - the attacker will be locked out of the resource for an extra 60 seconds after the last hit. This strategy helps circumvent bursts. Moving Window ------------- .. warning:: The moving window strategy is only implemented for the ``redis`` and ``in-memory`` storage backends. The strategy requires using a list with fast random access which is not very convenient to implement with a memcached storage. This strategy is the most effective for preventing bursts from by-passing the rate limit as the window for each limit is not fixed at the start and end of each time unit (i.e. N/second for a moving window means N in the last 1000 milliseconds). There is however a higher memory cost associated with this strategy as it requires ``N`` items to be maintained in memory per resource and rate limit. .. _ratelimit-headers: Rate-limiting Headers ===================== If the configuration is enabled, information about the rate limit with respect to the route being requested will be added to the response headers. Since multiple rate limits can be active for a given route - the rate limit with the lowest time granularity will be used in the scenario when the request does not breach any rate limits. .. tabularcolumns:: |p{8cm}|p{8.5cm}| ============================== ================================================ ``X-RateLimit-Limit`` The total number of requests allowed for the active window ``X-RateLimit-Remaining`` The number of requests remaining in the active window. ``X-RateLimit-Reset`` UTC seconds since epoch when the window will be reset. ``Retry-After`` Seconds to retry after or the http date when the Rate Limit will be reset. The way the value is presented depends on the configuration value set in `RATELIMIT_HEADER_REMAINING_VALUE` and defaults to `delta-seconds`. ============================== ================================================ .. warning:: Enabling the headers has an additional cost with certain storage / strategy combinations. * Memcached + Fixed Window: an extra key per rate limit is stored to calculate ``X-RateLimit-Reset`` * Redis + Moving Window: an extra call to redis is involved during every request to calculate ``X-RateLimit-Remaining`` and ``X-RateLimit-Reset`` The header names can be customised if required by either using the flask configuration (:ref:`ratelimit-conf`) values or by setting the ``header_mapping`` property of the :class:`Limiter` as follows:: from flask.ext.limiter import Limiter, HEADERS limiter = Limiter() limiter.header_mapping = { HEADERS.LIMIT : "X-My-Limit", HEADERS.RESET : "X-My-Reset", HEADERS.REMAINING: "X-My-Remaining" } # or by only partially specifying the overrides limiter.header_mapping[HEADERS.LIMIT] = 'X-My-Limit' Recipes ======= .. _keyfunc-customization: Rate Limit Key Functions ------------------------- You can easily customize your rate limits to be based on any characteristic of the incoming request. Both the :class:`Limiter` constructor and the :meth:`Limiter.limit` decorator accept a keyword argument ``key_func`` that should return a string (or an object that has a string representation). Rate limiting a route by current user (using Flask-Login):: @route("/test") @login_required @limiter.limit("1 per day", key_func = lambda : current_user.username) def test_route(): return "42" Rate limiting all requests by country:: from flask import request, Flask import GeoIP gi = GeoIP.open("GeoLiteCity.dat", GeoIP.GEOIP_INDEX_CACHE | GeoIP.GEOIP_CHECK_CACHE) def get_request_country(): return gi.record_by_name(request.remote_addr)['region_name'] app = Flask(__name__) limiter = Limiter(app, global_limits=["10/hour"], key_func = get_request_country) Custom Rate limit exceeded responses ------------------------------------ The default configuration results in an ``abort(429)`` being called every time a rate limit is exceeded for a particular route. The exceeded limit is added to the response and results in an response body that looks something like:: 429 Too Many Requests

Too Many Requests

1 per 1 day

If you want to configure the response you can register an error handler for the ``429`` error code in a manner similar to the following example, which returns a json response instead:: @app.errorhandler(429) def ratelimit_handler(e): return make_response( jsonify(error="ratelimit exceeded %s" % e.description) , 429 ) Using Flask Pluggable Views --------------------------- If you are using a class based approach to defining view function, the regular method of decorating a view function to apply a per route rate limit will not work. You can add rate limits to your view classes using the following approach. .. code-block:: python app = Flask(__name__) limiter = Limiter(app, key_func=get_remote_address) class MyView(flask.views.MethodView): decorators = [limiter.limit("10/second")] def get(self): return "get" def put(self): return "put" .. note:: This approach is limited to either sharing the same rate limit for all http methods of a given :class:`flask.views.View` or applying the declared rate limit independently for each http method (to accomplish this, pass in ``True`` to the ``per_method`` keyword argument to :meth:`Limiter.limit`). Alternatively, the limit can be restricted to only certain http methods by passing them as a list to the `methods` keyword argument. The above approach has been tested with sub-classes of :class:`flask.views.View`, :class:`flask.views.MethodView` and :class:`flask.ext.restful.Resource`. Rate limiting all routes in a :class:`flask.Blueprint` ------------------------------------------------------ :meth:`Limiter.limit`, :meth:`Limiter.shared_limit` & :meth:`Limiter.exempt` can all be applied to :class:`flask.Blueprint` instances as well. In the following example the **login** Blueprint has a special rate limit applied to all its routes, while the **help** Blueprint is exempt from all rate limits. The **regular** Blueprint follows the global rate limits. .. code-block:: python app = Flask(__name__) login = Blueprint("login", __name__, url_prefix = "/login") regular = Blueprint("regular", __name__, url_prefix = "/regular") doc = Blueprint("doc", __name__, url_prefix = "/doc") @doc.route("/") def doc_index(): return "doc" @regular.route("/") def regular_index(): return "regular" @login.route("/") def login_index(): return "login" limiter = Limiter(app, global_limits = ["1/second"], key_func=get_remote_address) limiter.limit("60/hour")(login) limiter.exempt(doc) app.register_blueprint(doc) app.register_blueprint(login) app.register_blueprint(regular) .. _logging: Logging ------- Each :class:`Limiter` instance has a ``logger`` instance variable that is by default **not** configured with a handler. You can add your own handler to obtain log messages emitted by :mod:`flask_limiter`. Simple stdout handler:: limiter = Limiter(app, key_func=get_remote_address) limiter.logger.addHandler(StreamHandler()) Reusing all the handlers of the ``logger`` instance of the :class:`flask.Flask` app:: app = Flask(__name__) limiter = Limiter(app, key_func=get_remote_address) for handler in app.logger.handlers: limiter.logger.addHandler(handler) Custom error messages --------------------- :meth:`Limiter.limit` & :meth:`Limiter.shared_limit` can be provided with an `error_message` argument to over ride the default `n per x` error message that is returned to the calling client. The `error_message` argument can either be a simple string or a callable that returns one. .. code-block:: python app = Flask(__name__) limiter = Limiter(app, key_func=get_remote_address) def error_handler(): return app.config.get("DEFAULT_ERROR_MESSAGE") @limiter.limit("1/second", error_message='chill!') @app.route("/") def index(): .... @limiter.limit("10/second", error_message=error_handler) @app.route("/ping") def ping(): .... API === Core ---- .. autoclass:: Limiter Exceptions ---------- .. autoexception:: RateLimitExceeded Utils ----- .. automodule:: flask_limiter.util .. include:: ../../HISTORY.rst References ========== * `Redis rate limiting pattern #2 `_ * `DomainTools redis rate limiter `_ * `limits: python rate limiting utilities `_ .. include:: ../../CONTRIBUTIONS.rst flask-limiter-0.9.3/flask_limiter/000077500000000000000000000000001267142337500171255ustar00rootroot00000000000000flask-limiter-0.9.3/flask_limiter/__init__.py000066400000000000000000000003351267142337500212370ustar00rootroot00000000000000""" Flask-Limiter extension for rate limiting """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions from .errors import RateLimitExceeded from .extension import Limiter, HEADERS flask-limiter-0.9.3/flask_limiter/_version.py000066400000000000000000000154401267142337500213270ustar00rootroot00000000000000# pylint: skip-file # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (build by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.8+ (https://github.com/warner/python-versioneer) # these strings will be replaced by git during git-archive git_refnames = "$Format:%d$" git_full = "$Format:%H$" import subprocess import sys def run_command(args, cwd=None, verbose=False, hide_stderr=False): try: # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) except EnvironmentError: e = sys.exc_info()[1] if verbose: print("unable to run %s" % args[0]) print(e) return None stdout = p.communicate()[0].strip() if sys.version >= '3': stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % args[0]) return None return stdout import sys import re import os.path def get_expanded_variables(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # variables. When used from setup.py, we don't want to import # _version.py, so we do it with a regexp instead. This function is not # used from _version.py. variables = {} try: f = open(versionfile_abs,"r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["full"] = mo.group(1) f.close() except EnvironmentError: pass return variables def versions_from_expanded_variables(variables, tag_prefix, verbose=False): refnames = variables["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("variables are unexpanded, not using") return {} # unexpanded, so not in an unpacked git-archive tarball refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs-tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return { "version": r, "full": variables["full"].strip() } # no suitable tags, so we use the full revision id if verbose: print("no suitable tags, using full revision id") return { "version": variables["full"].strip(), "full": variables["full"].strip() } def versions_from_vcs(tag_prefix, root, verbose=False): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' variables were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, # meaning we're inside a checked out source tree. if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) return {} GIT = "git" if sys.platform == "win32": GIT = "git.cmd" stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"], cwd=root) if stdout is None: return {} if not stdout.startswith(tag_prefix): if verbose: print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)) return {} tag = stdout[len(tag_prefix):] stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root) if stdout is None: return {} full = stdout.strip() if tag.endswith("-dirty"): full += "-dirty" return {"version": tag, "full": full} def versions_from_parentdir(parentdir_prefix, root, verbose=False): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % (root, dirname, parentdir_prefix)) return None return {"version": dirname[len(parentdir_prefix):], "full": ""} tag_prefix = "" parentdir_prefix = "flask_limiter-" versionfile_source = "flask_limiter/_version.py" def get_versions(default={"version": "unknown", "full": ""}, verbose=False): # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded variables. variables = { "refnames": git_refnames, "full": git_full } ver = versions_from_expanded_variables(variables, tag_prefix, verbose) if ver: return ver try: root = os.path.abspath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in range(len(versionfile_source.split("/"))): root = os.path.dirname(root) except NameError: return default return (versions_from_vcs(tag_prefix, root, verbose) or versions_from_parentdir(parentdir_prefix, root, verbose) or default) flask-limiter-0.9.3/flask_limiter/errors.py000066400000000000000000000016311267142337500210140ustar00rootroot00000000000000""" errors and exceptions """ from distutils.version import LooseVersion from pkg_resources import get_distribution from werkzeug import exceptions from six import text_type werkzeug_version = get_distribution("werkzeug").version if LooseVersion(werkzeug_version) < LooseVersion("0.9"): # pragma: no cover # sorry, for touching your internals :). import werkzeug._internal werkzeug._internal.HTTP_STATUS_CODES[429] = 'Too Many Requests' class RateLimitExceeded(exceptions.HTTPException): """ exception raised when a rate limit is hit. The exception results in ``abort(429)`` being called. """ code = 429 def __init__(self, limit): self.description = text_type(limit) super(RateLimitExceeded, self).__init__() else: # Werkzeug 0.9 and up have an existing exception for 429 RateLimitExceeded = exceptions.TooManyRequests flask-limiter-0.9.3/flask_limiter/extension.py000066400000000000000000000505001267142337500215130ustar00rootroot00000000000000""" the flask extension """ import warnings from functools import wraps import logging from flask import request, current_app, g, Blueprint from werkzeug.http import http_date from limits.errors import ConfigurationError from limits.storage import storage_from_string, MemoryStorage from limits.strategies import STRATEGIES from limits.util import parse_many import six import sys import time from .errors import RateLimitExceeded from .util import get_ipaddr class C: ENABLED = "RATELIMIT_ENABLED" HEADERS_ENABLED = "RATELIMIT_HEADERS_ENABLED" STORAGE_URL = "RATELIMIT_STORAGE_URL" STORAGE_OPTIONS = "RATELIMIT_STORAGE_OPTIONS" STRATEGY = "RATELIMIT_STRATEGY" GLOBAL_LIMITS = "RATELIMIT_GLOBAL" HEADER_LIMIT = "RATELIMIT_HEADER_LIMIT" HEADER_REMAINING = "RATELIMIT_HEADER_REMAINING" HEADER_RESET = "RATELIMIT_HEADER_RESET" SWALLOW_ERRORS = "RATELIMIT_SWALLOW_ERRORS" IN_MEMORY_FALLBACK = "RATELIMIT_IN_MEMORY_FALLBACK" HEADER_RETRY_AFTER = "RATELIMIT_HEADER_RETRY_AFTER" HEADER_RETRY_AFTER_VALUE = "RATELIMIT_HEADER_RETRY_AFTER_VALUE" class HEADERS: RESET = 1 REMAINING = 2 LIMIT = 3 RETRY_AFTER = 4 MAX_BACKEND_CHECKS = 5 class ExtLimit(object): """ simple wrapper to encapsulate limits and their context """ def __init__(self, limit, key_func, scope, per_method, methods, error_message, exempt_when): self._limit = limit self.key_func = key_func self._scope = scope self.per_method = per_method self.methods = methods and [m.lower() for m in methods] or methods self.error_message = error_message self.exempt_when = exempt_when @property def limit(self): return self._limit() if callable(self._limit) else self._limit @property def scope(self): return self._scope(request.endpoint) if callable(self._scope) else self._scope @property def is_exempt(self): """Check if the limit is exempt.""" return self.exempt_when and self.exempt_when() class Limiter(object): """ :param app: :class:`flask.Flask` instance to initialize the extension with. :param list global_limits: a variable list of strings denoting global limits to apply to all routes. :ref:`ratelimit-string` for more details. :param function key_func: a callable that returns the domain to rate limit by. :param bool headers_enabled: whether ``X-RateLimit`` response headers are written. :param str strategy: the strategy to use. refer to :ref:`ratelimit-strategy` :param str storage_uri: the storage location. refer to :ref:`ratelimit-conf` :param dict storage_options: kwargs to pass to the storage implementation upon instantiation. :param bool auto_check: whether to automatically check the rate limit in the before_request chain of the application. default ``True`` :param bool swallow_errors: whether to swallow errors when hitting a rate limit. An exception will still be logged. default ``False`` :param list in_memory_fallback: a variable list of strings denoting fallback limits to apply when the storage is down. """ def __init__(self, app=None , key_func=None , global_limits=[] , headers_enabled=False , strategy=None , storage_uri=None , storage_options={} , auto_check=True , swallow_errors=False , in_memory_fallback=[] , retry_after=None ): self.app = app self.logger = logging.getLogger("flask-limiter") self.enabled = True self._global_limits = [] self._in_memory_fallback = [] self._exempt_routes = set() self._request_filters = [] self._headers_enabled = headers_enabled self._header_mapping = {} self._retry_after = retry_after self._strategy = strategy self._storage_uri = storage_uri self._storage_options = storage_options self._auto_check = auto_check self._swallow_errors = swallow_errors if not key_func: warnings.warn( "Use of the default `get_ipaddr` function is discouraged." " Please refer to https://flask-limiter.readthedocs.org/#rate-limit-domain" " for the recommended configuration", UserWarning ) self._key_func = key_func or get_ipaddr for limit in global_limits: self._global_limits.extend( [ ExtLimit( limit, self._key_func, None, False, None, None, None ) for limit in parse_many(limit) ] ) for limit in in_memory_fallback: self._in_memory_fallback.extend( [ ExtLimit( limit, self._key_func, None, False, None, None, None ) for limit in parse_many(limit) ] ) self._route_limits = {} self._dynamic_route_limits = {} self._blueprint_limits = {} self._blueprint_dynamic_limits = {} self._blueprint_exempt = set() self._storage = self._limiter = None self._storage_dead = False self._fallback_limiter = None self.__check_backend_count = 0 self.__last_check_backend = time.time() class BlackHoleHandler(logging.StreamHandler): def emit(*_): return self.logger.addHandler(BlackHoleHandler()) if app: self.init_app(app) def init_app(self, app): """ :param app: :class:`flask.Flask` instance to rate limit. """ self.enabled = app.config.setdefault(C.ENABLED, True) self._swallow_errors = app.config.setdefault( C.SWALLOW_ERRORS, self._swallow_errors ) self._headers_enabled = ( self._headers_enabled or app.config.setdefault(C.HEADERS_ENABLED, False) ) self._storage_options.update( app.config.get(C.STORAGE_OPTIONS, {}) ) self._storage = storage_from_string( self._storage_uri or app.config.setdefault(C.STORAGE_URL, 'memory://'), ** self._storage_options ) strategy = ( self._strategy or app.config.setdefault(C.STRATEGY, 'fixed-window') ) if strategy not in STRATEGIES: raise ConfigurationError("Invalid rate limiting strategy %s" % strategy) self._limiter = STRATEGIES[strategy](self._storage) self._header_mapping.update({ HEADERS.RESET : self._header_mapping.get(HEADERS.RESET,None) or app.config.setdefault(C.HEADER_RESET, "X-RateLimit-Reset"), HEADERS.REMAINING : self._header_mapping.get(HEADERS.REMAINING,None) or app.config.setdefault(C.HEADER_REMAINING, "X-RateLimit-Remaining"), HEADERS.LIMIT : self._header_mapping.get(HEADERS.LIMIT,None) or app.config.setdefault(C.HEADER_LIMIT, "X-RateLimit-Limit"), HEADERS.RETRY_AFTER : self._header_mapping.get(HEADERS.RETRY_AFTER,None) or app.config.setdefault(C.HEADER_RETRY_AFTER, "Retry-After"), }) self._retry_after = ( self._retry_after or app.config.get(C.HEADER_RETRY_AFTER_VALUE) ) conf_limits = app.config.get(C.GLOBAL_LIMITS, None) if not self._global_limits and conf_limits: self._global_limits = [ ExtLimit( limit, self._key_func, None, False, None, None, None ) for limit in parse_many(conf_limits) ] fallback_limits = app.config.get(C.IN_MEMORY_FALLBACK, None) if not self._in_memory_fallback and fallback_limits: self._in_memory_fallback = [ ExtLimit( limit, self._key_func, None, False, None, None, None ) for limit in parse_many(fallback_limits) ] if self._auto_check: app.before_request(self.__check_request_limit) app.after_request(self.__inject_headers) if self._in_memory_fallback: self._fallback_storage = MemoryStorage() self._fallback_limiter = STRATEGIES[strategy](self._fallback_storage) # purely for backward compatibility as stated in flask documentation if not hasattr(app, 'extensions'): app.extensions = {} # pragma: no cover app.extensions['limiter'] = self def __should_check_backend(self): if self.__check_backend_count > MAX_BACKEND_CHECKS: self.__check_backend_count = 0 if time.time() - self.__last_check_backend > pow(2, self.__check_backend_count): self.__last_check_backend = time.time() self.__check_backend_count += 1 return True return False def check(self): """ check the limits for the current request :raises: RateLimitExceeded """ self.__check_request_limit() def reset(self): """ resets the storage if it supports being reset """ try: self._storage.reset() self.logger.info("Storage has be reset and all limits cleared") except NotImplementedError: self.logger.warning("This storage type does not support being reset") @property def limiter(self): if self._storage_dead and self._in_memory_fallback: return self._fallback_limiter else: return self._limiter def __inject_headers(self, response): current_limit = getattr(g, 'view_rate_limit', None) if self.enabled and self._headers_enabled and current_limit: window_stats = self.limiter.get_window_stats(*current_limit) response.headers.add( self._header_mapping[HEADERS.LIMIT], str(current_limit[0].amount) ) response.headers.add( self._header_mapping[HEADERS.REMAINING], window_stats[1] ) response.headers.add( self._header_mapping[HEADERS.RESET], window_stats[0] ) response.headers.add( self._header_mapping[HEADERS.RETRY_AFTER], self._retry_after == 'http-date' and http_date(window_stats[0]) or int(window_stats[0] - time.time()) ) return response def __check_request_limit(self): endpoint = request.endpoint or "" view_func = current_app.view_functions.get(endpoint, None) name = ("%s.%s" % ( view_func.__module__, view_func.__name__ ) if view_func else "" ) if (not request.endpoint or not self.enabled or view_func == current_app.send_static_file or name in self._exempt_routes or request.blueprint in self._blueprint_exempt or any(fn() for fn in self._request_filters) ): return limits = ( name in self._route_limits and self._route_limits[name] or [] ) dynamic_limits = [] if name in self._dynamic_route_limits: for lim in self._dynamic_route_limits[name]: try: dynamic_limits.extend( ExtLimit( limit, lim.key_func, lim.scope, lim.per_method, lim.methods, lim.error_message, lim.exempt_when ) for limit in parse_many(lim.limit) ) except ValueError as e: self.logger.error( "failed to load ratelimit for view function %s (%s)" , name, e ) if request.blueprint: if (request.blueprint in self._blueprint_dynamic_limits and not dynamic_limits ): for lim in self._blueprint_dynamic_limits[request.blueprint]: try: dynamic_limits.extend( ExtLimit( limit, lim.key_func, lim.scope, lim.per_method, lim.methods, lim.error_message, lim.exempt_when ) for limit in parse_many(lim.limit) ) except ValueError as e: self.logger.error( "failed to load ratelimit for blueprint %s (%s)" , request.blueprint, e ) if (request.blueprint in self._blueprint_limits and not limits ): limits.extend(self._blueprint_limits[request.blueprint]) failed_limit = None limit_for_header = None try: all_limits = [] if self._storage_dead and self._fallback_limiter: if self.__should_check_backend() and self._storage.check(): self.logger.info( "Rate limit storage recovered" ) self._storage_dead = False self.__check_backend_count = 0 else: all_limits = self._in_memory_fallback if not all_limits: all_limits = (limits + dynamic_limits or self._global_limits) for lim in all_limits: limit_scope = lim.scope or endpoint if lim.is_exempt: return if lim.methods is not None and request.method.lower() not in lim.methods: return if lim.per_method: limit_scope += ":%s" % request.method if not limit_for_header or lim.limit < limit_for_header[0]: limit_for_header = (lim.limit, lim.key_func(), limit_scope) if not self.limiter.hit(lim.limit, lim.key_func(), limit_scope): self.logger.warning( "ratelimit %s (%s) exceeded at endpoint: %s" , lim.limit, lim.key_func(), limit_scope ) failed_limit = lim limit_for_header = (lim.limit, lim.key_func(), limit_scope) break g.view_rate_limit = limit_for_header if failed_limit: if failed_limit.error_message: exc_description = failed_limit.error_message if not callable( failed_limit.error_message ) else failed_limit.error_message() else: exc_description = six.text_type(failed_limit.limit) raise RateLimitExceeded(exc_description) except Exception as e: # no qa if isinstance(e, RateLimitExceeded): six.reraise(*sys.exc_info()) if self._in_memory_fallback and not self._storage_dead: self.logger.warn( "Rate limit storage unreachable - falling back to" " in-memory storage" ) self._storage_dead = True self.__check_request_limit() else: if self._swallow_errors: self.logger.exception( "Failed to rate limit. Swallowing error" ) else: six.reraise(*sys.exc_info()) def __limit_decorator(self, limit_value, key_func=None, shared=False, scope=None, per_method=False, methods=None, error_message=None, exempt_when=None): _scope = scope if shared else None def _inner(obj): func = key_func or self._key_func is_route = not isinstance(obj, Blueprint) name = "%s.%s" % (obj.__module__, obj.__name__) if is_route else obj.name dynamic_limit, static_limits = None, [] if callable(limit_value): dynamic_limit = ExtLimit(limit_value, func, _scope, per_method, methods, error_message, exempt_when) else: try: static_limits = [ExtLimit( limit, func, _scope, per_method, methods, error_message, exempt_when ) for limit in parse_many(limit_value)] except ValueError as e: self.logger.error( "failed to configure %s %s (%s)", "view function" if is_route else "blueprint", name, e ) if isinstance(obj, Blueprint): if dynamic_limit: self._blueprint_dynamic_limits.setdefault(name, []).append( dynamic_limit ) else: self._blueprint_limits.setdefault(name, []).extend( static_limits ) else: @wraps(obj) def __inner(*a, **k): return obj(*a, **k) if dynamic_limit: self._dynamic_route_limits.setdefault(name, []).append( dynamic_limit ) else: self._route_limits.setdefault(name, []).extend( static_limits ) return __inner return _inner def limit(self, limit_value, key_func=None, per_method=False, methods=None, error_message=None, exempt_when=None): """ decorator to be used for rate limiting individual routes or blueprints. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param bool per_method: whether the limit is sub categorized into the http method of the request. :param list methods: if specified, only the methods in this list will be rate limited (default: None). :param error_message: string (or callable that returns one) to override the error message used in the response. :return: """ return self.__limit_decorator(limit_value, key_func, per_method=per_method, methods=methods, error_message=error_message, exempt_when=exempt_when) def shared_limit(self, limit_value, scope, key_func=None, error_message=None, exempt_when=None): """ decorator to be applied to multiple routes sharing the same rate limit. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param scope: a string or callable that returns a string for defining the rate limiting scope. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param error_message: string (or callable that returns one) to override the error message used in the response. """ return self.__limit_decorator( limit_value, key_func, True, scope, error_message=error_message, exempt_when=exempt_when ) def exempt(self, obj): """ decorator to mark a view or all views in a blueprint as exempt from rate limits. """ if not isinstance(obj, Blueprint): name = "%s.%s" % (obj.__module__, obj.__name__) @wraps(obj) def __inner(*a, **k): return obj(*a, **k) self._exempt_routes.add(name) return __inner else: self._blueprint_exempt.add(obj.name) def request_filter(self, fn): """ decorator to mark a function as a filter to be executed to check if the request is exempt from rate limiting. """ self._request_filters.append(fn) return fn flask-limiter-0.9.3/flask_limiter/util.py000066400000000000000000000007571267142337500204650ustar00rootroot00000000000000""" """ from flask import request def get_ipaddr(): """ :return: the ip address for the current request (or 127.0.0.1 if none found) based on the X-Forwarded-For headers. """ if request.access_route: return request.access_route[0] else: return request.remote_addr or '127.0.0.1' def get_remote_address(): """ :return: the ip address for the current request (or 127.0.0.1 if none found) """ return request.remote_addr or '127.0.0.1' flask-limiter-0.9.3/flask_limiter/version.py000066400000000000000000000000561267142337500211650ustar00rootroot00000000000000""" empty file to be updated by versioneer """flask-limiter-0.9.3/push-release.sh000077500000000000000000000002661267142337500172400ustar00rootroot00000000000000#!/bin/bash cur=$(git rev-parse --abbrev-ref HEAD) git checkout master git push origin master --tags git checkout stable git merge master git push origin stable git checkout $cur flask-limiter-0.9.3/requirements/000077500000000000000000000000001267142337500170235ustar00rootroot00000000000000flask-limiter-0.9.3/requirements/ci.txt000066400000000000000000000000251267142337500201540ustar00rootroot00000000000000-r test.txt coverallsflask-limiter-0.9.3/requirements/main.txt000066400000000000000000000000351267142337500205060ustar00rootroot00000000000000limits Flask>=0.8 six>=1.4.1 flask-limiter-0.9.3/requirements/test.txt000066400000000000000000000002221267142337500205370ustar00rootroot00000000000000-r main.txt nose mock coverage nose-cov git+https://github.com/alisaifee/pymemcache.git#egg=pymemcache redis hiro>0.1.6 ordereddict flask-restful flask-limiter-0.9.3/setup.py000077500000000000000000000021561267142337500160210ustar00rootroot00000000000000""" setup.py for Flask-Limiter """ __author__ = "Ali-Akber Saifee" __email__ = "ali@indydevs.org" __copyright__ = "Copyright 2014, Ali-Akber Saifee" from setuptools import setup, find_packages import os this_dir = os.path.abspath(os.path.dirname(__file__)) REQUIREMENTS = filter(None, open( os.path.join(this_dir, 'requirements', 'main.txt')).read().splitlines()) import versioneer versioneer.versionfile_source = "flask_limiter/_version.py" versioneer.versionfile_build = "flask_limiter/version.py" versioneer.tag_prefix = "" versioneer.parentdir_prefix = "flask_limiter-" setup( name='Flask-Limiter', author=__author__, author_email=__email__, license="MIT", url="https://flask-limiter.readthedocs.org", zip_safe=False, version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), install_requires=list(REQUIREMENTS), classifiers=[k for k in open('CLASSIFIERS').read().split('\n') if k], description='Rate limiting for flask applications', long_description=open('README.rst').read() + open('HISTORY.rst').read(), packages=find_packages(exclude=["tests*"]), ) flask-limiter-0.9.3/shippable.yml000066400000000000000000000007241267142337500167750ustar00rootroot00000000000000language: python python: - "2.6" - "2.7" - "3.3" - "3.4" - "pypy" install: - pip install -r requirements/ci.txt --use-mirrors services: - redis-server - memcached script: - mkdir -p shippable/codecoverage/ - mkdir -p shippable/testresults/ - nosetests tests --with-coverage --cover-branches --cover-xml --cover-xml-file=shippable/codecoverage/coverage.xml --with-xunit --xunit-file=shippable/testresults/nosetests.xml -v flask-limiter-0.9.3/tag.sh000077500000000000000000000017561267142337500154230ustar00rootroot00000000000000#!/bin/bash rm -rf build echo current version:$(python setup.py --version) read -p "new version:" new_version last_portion=$(grep -E "^Changelog$" HISTORY.rst -5 | grep -E "^\d+.\d+.\d+") changelog_file=/var/tmp/flask-ratelimit.newchangelog new_changelog_heading="${new_version} `date +"%Y-%m-%d"`" new_changelog_heading_sep=$(python -c "print '-'*len('$new_changelog_heading')") echo $new_changelog_heading > $changelog_file echo $new_changelog_heading_sep >> $changelog_file python -c "print open('HISTORY.rst').read().replace('$last_portion', open('$changelog_file').read() +'\n' + '$last_portion')" > HISTORY.rst.new cp HISTORY.rst.new HISTORY.rst vim HISTORY.rst if rst2html.py HISTORY.rst > /dev/null then echo "tagging $new_version" git add HISTORY.rst git commit -m "updating changelog for ${new_version}" git tag -s ${new_version} -m "tagging version ${new_version}" python setup.py build sdist bdist_egg upload else echo changelog has errors. skipping tag. fi; flask-limiter-0.9.3/tests/000077500000000000000000000000001267142337500154425ustar00rootroot00000000000000flask-limiter-0.9.3/tests/__init__.py000066400000000000000000000006621267142337500175570ustar00rootroot00000000000000from functools import wraps import platform from nose.plugins.skip import SkipTest def test_import(): import flask_limiter def test_module_version(): import flask_limiter assert flask_limiter.__version__ is not None def skip_if_pypy(fn): @wraps(fn) def __inner(*a, **k): if platform.python_implementation().lower() == "pypy": raise SkipTest return fn(*a, **k) return __inner flask-limiter-0.9.3/tests/test_deprecation.py000066400000000000000000000005461267142337500213550ustar00rootroot00000000000000""" """ import unittest import warnings class DeprecationTests(unittest.TestCase): def test_insecure_setup(self): with warnings.catch_warnings(record=True) as w: from flask import Flask from flask_limiter import Limiter app = Flask(__name__) Limiter(app) self.assertEqual(len(w), 1) flask-limiter-0.9.3/tests/test_flask_ext.py000066400000000000000000001264701267142337500210450ustar00rootroot00000000000000""" """ import json import logging import time import unittest import hiro import mock import redis import datetime from flask import Flask, Blueprint, request, current_app, make_response from flask.ext import restful from flask.ext.restful import Resource from flask.views import View, MethodView from limits.errors import ConfigurationError from limits.storage import MemcachedStorage from limits.strategies import MovingWindowRateLimiter from flask.ext.limiter.extension import C, Limiter, HEADERS from flask.ext.limiter.util import get_remote_address, get_ipaddr class FlaskExtTests(unittest.TestCase): def setUp(self): redis.Redis().flushall() def build_app(self, config={}, **limiter_args): app = Flask(__name__) for k, v in config.items(): app.config.setdefault(k, v) limiter_args.setdefault('key_func', get_remote_address) limiter = Limiter(app, **limiter_args) mock_handler = mock.Mock() mock_handler.level = logging.INFO limiter.logger.addHandler(mock_handler) return app, limiter def test_invalid_strategy(self): app = Flask(__name__) app.config.setdefault(C.STRATEGY, "fubar") self.assertRaises(ConfigurationError, Limiter, app, key_func=get_remote_address) def test_invalid_storage_string(self): app = Flask(__name__) app.config.setdefault(C.STORAGE_URL, "fubar://localhost:1234") self.assertRaises(ConfigurationError, Limiter, app, key_func=get_remote_address) def test_constructor_arguments_over_config(self): app = Flask(__name__) app.config.setdefault(C.STRATEGY, "fixed-window-elastic-expiry") limiter = Limiter(strategy='moving-window', key_func=get_remote_address) limiter.init_app(app) app.config.setdefault(C.STORAGE_URL, "redis://localhost:6379") self.assertEqual(type(limiter._limiter), MovingWindowRateLimiter) limiter = Limiter(storage_uri='memcached://localhost:11211', key_func=get_remote_address) limiter.init_app(app) self.assertEqual(type(limiter._storage), MemcachedStorage) def test_error_message(self): app, limiter = self.build_app({ C.GLOBAL_LIMITS: "1 per day" }) @app.route("/") def null(): return "" with app.test_client() as cli: cli.get("/") self.assertTrue("1 per 1 day" in cli.get("/").data.decode()) @app.errorhandler(429) def ratelimit_handler(e): return make_response('{"error" : "rate limit %s"}' % str(e.description), 429) self.assertEqual({'error': 'rate limit 1 per 1 day'}, json.loads(cli.get("/").data.decode())) def test_reset(self): app, limiter = self.build_app({ C.GLOBAL_LIMITS: "1 per day" }) @app.route("/") def null(): return "Hello Reset" with app.test_client() as cli: cli.get("/") self.assertTrue("1 per 1 day" in cli.get("/").data.decode()) limiter.reset() self.assertEqual("Hello Reset", cli.get("/").data.decode()) self.assertTrue("1 per 1 day" in cli.get("/").data.decode()) def test_swallow_error(self): app, limiter = self.build_app({ C.GLOBAL_LIMITS: "1 per day", C.SWALLOW_ERRORS: True }) @app.route("/") def null(): return "ok" with app.test_client() as cli: with mock.patch("limits.strategies.FixedWindowRateLimiter.hit") as hit: def raiser(*a, **k): raise Exception hit.side_effect = raiser self.assertTrue("ok" in cli.get("/").data.decode()) def test_no_swallow_error(self): app, limiter = self.build_app({ C.GLOBAL_LIMITS: "1 per day", }) @app.route("/") def null(): return "ok" @app.errorhandler(500) def e500(e): return str(e), 500 with app.test_client() as cli: with mock.patch("limits.strategies.FixedWindowRateLimiter.hit") as hit: def raiser(*a, **k): raise Exception("underlying") hit.side_effect = raiser self.assertEqual(500, cli.get("/").status_code) self.assertEqual("underlying", cli.get("/").data.decode()) def test_combined_rate_limits(self): app, limiter = self.build_app({ C.GLOBAL_LIMITS: "1 per hour; 10 per day" }) @app.route("/t1") @limiter.limit("100 per hour;10/minute") def t1(): return "t1" @app.route("/t2") def t2(): return "t2" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: self.assertEqual(200, cli.get("/t1").status_code) self.assertEqual(200, cli.get("/t2").status_code) self.assertEqual(429, cli.get("/t2").status_code) def test_key_func(self): app, limiter = self.build_app() @app.route("/t1") @limiter.limit("100 per minute", lambda: "test") def t1(): return "test" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: for i in range(0, 100): self.assertEqual(200, cli.get("/t1", headers={"X_FORWARDED_FOR": "127.0.0.2"}).status_code ) self.assertEqual(429, cli.get("/t1").status_code) def test_multiple_decorators(self): app, limiter = self.build_app(key_func=get_ipaddr) @app.route("/t1") @limiter.limit("100 per minute", lambda: "test") # effectively becomes a limit for all users @limiter.limit("50/minute") # per ip as per default key_func def t1(): return "test" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: for i in range(0, 100): self.assertEqual(200 if i < 50 else 429, cli.get("/t1", headers={"X_FORWARDED_FOR": "127.0.0.2"}).status_code ) for i in range(50): self.assertEqual(200, cli.get("/t1").status_code) self.assertEqual(429, cli.get("/t1").status_code) self.assertEqual(429, cli.get("/t1", headers={"X_FORWARDED_FOR": "127.0.0.3"}).status_code) def test_logging(self): app = Flask(__name__) limiter = Limiter(app, key_func=get_remote_address) mock_handler = mock.Mock() mock_handler.level = logging.INFO limiter.logger.addHandler(mock_handler) @app.route("/t1") @limiter.limit("1/minute") def t1(): return "test" with app.test_client() as cli: self.assertEqual(200, cli.get("/t1").status_code) self.assertEqual(429, cli.get("/t1").status_code) self.assertEqual(mock_handler.handle.call_count, 1) def test_reuse_logging(self): app = Flask(__name__) app_handler = mock.Mock() app_handler.level = logging.INFO app.logger.addHandler(app_handler) limiter = Limiter(app, key_func=get_remote_address) for handler in app.logger.handlers: limiter.logger.addHandler(handler) @app.route("/t1") @limiter.limit("1/minute") def t1(): return "42" with app.test_client() as cli: cli.get("/t1") cli.get("/t1") self.assertEqual(app_handler.handle.call_count, 1) def test_exempt_routes(self): app, limiter = self.build_app(global_limits=["1/minute"]) @app.route("/t1") def t1(): return "test" @app.route("/t2") @limiter.exempt def t2(): return "test" with app.test_client() as cli: self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 200) def test_blueprint(self): app, limiter = self.build_app(global_limits=["1/minute"]) bp = Blueprint("main", __name__) @bp.route("/t1") def t1(): return "test" @bp.route("/t2") @limiter.limit("10 per minute") def t2(): return "test" app.register_blueprint(bp) with app.test_client() as cli: self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) for i in range(0, 10): self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 429) def test_register_blueprint(self): app, limiter = self.build_app(global_limits=["1/minute"]) bp_1 = Blueprint("bp1", __name__) bp_2 = Blueprint("bp2", __name__) bp_3 = Blueprint("bp3", __name__) bp_4 = Blueprint("bp4", __name__) @bp_1.route("/t1") def t1(): return "test" @bp_1.route("/t2") def t2(): return "test" @bp_2.route("/t3") def t3(): return "test" @bp_3.route("/t4") def t4(): return "test" @bp_4.route("/t5") def t4(): return "test" def dy_limit(): return "1/second" app.register_blueprint(bp_1) app.register_blueprint(bp_2) app.register_blueprint(bp_3) app.register_blueprint(bp_4) limiter.limit("1/second")(bp_1) limiter.exempt(bp_3) limiter.limit(dy_limit)(bp_4) with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) timeline.forward(1) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 429) timeline.forward(1) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t3").status_code, 200) for i in range(0, 10): timeline.forward(1) self.assertEqual(cli.get("/t3").status_code, 429) for i in range(0, 10): self.assertEqual(cli.get("/t4").status_code, 200) self.assertEqual(cli.get("/t5").status_code, 200) self.assertEqual(cli.get("/t5").status_code, 429) def test_disabled_flag(self): app, limiter = self.build_app( config={C.ENABLED: False}, global_limits=["1/minute"] ) @app.route("/t1") def t1(): return "test" @app.route("/t2") @limiter.limit("10 per minute") def t2(): return "test" with app.test_client() as cli: self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 200) for i in range(0, 10): self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 200) def test_fallback_to_memory_config(self): _, limiter = self.build_app( config={C.ENABLED: True}, global_limits=["5/minute"], storage_uri="redis://localhost:6379", in_memory_fallback=["1/minute"] ) self.assertEqual(len(limiter._in_memory_fallback), 1) _, limiter = self.build_app( config={C.ENABLED: True, C.IN_MEMORY_FALLBACK: "1/minute"}, global_limits=["5/minute"], storage_uri="redis://localhost:6379", ) self.assertEqual(len(limiter._in_memory_fallback), 1) def test_fallback_to_memory_backoff_check(self): app, limiter = self.build_app( config={C.ENABLED: True}, global_limits=["5/minute"], storage_uri="redis://localhost:6379", in_memory_fallback=["1/minute"] ) @app.route("/t1") def t1(): return "test" with app.test_client() as cli: def raiser(*a): raise Exception("redis dead") with hiro.Timeline() as timeline: with mock.patch( "redis.client.Redis.execute_command" ) as exec_command: exec_command.side_effect = raiser self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) timeline.forward(1) self.assertEqual(cli.get("/t1").status_code, 429) timeline.forward(2) self.assertEqual(cli.get("/t1").status_code, 429) timeline.forward(4) self.assertEqual(cli.get("/t1").status_code, 429) timeline.forward(8) self.assertEqual(cli.get("/t1").status_code, 429) timeline.forward(16) self.assertEqual(cli.get("/t1").status_code, 429) timeline.forward(32) self.assertEqual(cli.get("/t1").status_code, 200) # redis back to normal, but exponential backoff will only # result in it being marked after pow(2,0) seconds and next # check self.assertEqual(cli.get("/t1").status_code, 429) timeline.forward(1) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) def test_fallback_to_memory(self): app, limiter = self.build_app( config={C.ENABLED: True}, global_limits=["5/minute"], storage_uri="redis://localhost:6379", in_memory_fallback=["1/minute"] ) @app.route("/t1") def t1(): return "test" @app.route("/t2") @limiter.limit("3 per minute") def t2(): return "test" with app.test_client() as cli: self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 429) def raiser(*a): raise Exception("redis dead") with mock.patch( "redis.client.Redis.execute_command" ) as exec_command: exec_command.side_effect = raiser self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 429) # redis back to normal, go back to regular limits with hiro.Timeline() as timeline: timeline.forward(1) limiter._storage.storage.flushall() self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 429) def test_decorated_dynamic_limits(self): app, limiter = self.build_app({"X": "2 per second"}, global_limits=["1/second"]) def request_context_limit(): limits = { "127.0.0.1": "10 per minute", "127.0.0.2": "1 per minute" } remote_addr = (request.access_route and request.access_route[0]) or request.remote_addr or '127.0.0.1' limit = limits.setdefault(remote_addr, '1 per minute') return limit @app.route("/t1") @limiter.limit("20/day") @limiter.limit(lambda: current_app.config.get("X")) @limiter.limit(request_context_limit) def t1(): return "42" @app.route("/t2") @limiter.limit(lambda: current_app.config.get("X")) def t2(): return "42" R1 = {"X_FORWARDED_FOR": "127.0.0.1, 127.0.0.0"} R2 = {"X_FORWARDED_FOR": "127.0.0.2"} with app.test_client() as cli: with hiro.Timeline().freeze() as timeline: for i in range(0, 10): self.assertEqual(cli.get("/t1", headers=R1).status_code, 200) timeline.forward(1) self.assertEqual(cli.get("/t1", headers=R1).status_code, 429) self.assertEqual(cli.get("/t1", headers=R2).status_code, 200) self.assertEqual(cli.get("/t1", headers=R2).status_code, 429) timeline.forward(60) self.assertEqual(cli.get("/t1", headers=R2).status_code, 200) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 200) self.assertEqual(cli.get("/t2").status_code, 429) timeline.forward(1) self.assertEqual(cli.get("/t2").status_code, 200) def test_invalid_decorated_dynamic_limits(self): app = Flask(__name__) app.config.setdefault("X", "2 per sec") limiter = Limiter(app, global_limits=["1/second"], key_func=get_remote_address) mock_handler = mock.Mock() mock_handler.level = logging.INFO limiter.logger.addHandler(mock_handler) @app.route("/t1") @limiter.limit(lambda: current_app.config.get("X")) def t1(): return "42" with app.test_client() as cli: with hiro.Timeline().freeze() as timeline: self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) # 2 for invalid limit, 1 for warning. self.assertEqual(mock_handler.handle.call_count, 3) self.assertTrue("failed to load ratelimit" in mock_handler.handle.call_args_list[0][0][0].msg) self.assertTrue("failed to load ratelimit" in mock_handler.handle.call_args_list[1][0][0].msg) self.assertTrue("exceeded at endpoint" in mock_handler.handle.call_args_list[2][0][0].msg) def test_invalid_decorated_static_limits(self): app = Flask(__name__) limiter = Limiter(app, global_limits=["1/second"], key_func=get_remote_address) mock_handler = mock.Mock() mock_handler.level = logging.INFO limiter.logger.addHandler(mock_handler) @app.route("/t1") @limiter.limit("2/sec") def t1(): return "42" with app.test_client() as cli: with hiro.Timeline().freeze() as timeline: self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) self.assertTrue("failed to configure" in mock_handler.handle.call_args_list[0][0][0].msg) self.assertTrue("exceeded at endpoint" in mock_handler.handle.call_args_list[1][0][0].msg) def test_invalid_decorated_static_limit_blueprint(self): app = Flask(__name__) limiter = Limiter(app, global_limits=["1/second"], key_func=get_remote_address) mock_handler = mock.Mock() mock_handler.level = logging.INFO limiter.logger.addHandler(mock_handler) bp = Blueprint("bp1", __name__) @bp.route("/t1") def t1(): return "42" limiter.limit("2/sec")(bp) app.register_blueprint(bp) with app.test_client() as cli: with hiro.Timeline().freeze() as timeline: self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) self.assertTrue("failed to configure" in mock_handler.handle.call_args_list[0][0][0].msg) self.assertTrue("exceeded at endpoint" in mock_handler.handle.call_args_list[1][0][0].msg) def test_invalid_decorated_dynamic_limits_blueprint(self): app = Flask(__name__) app.config.setdefault("X", "2 per sec") limiter = Limiter(app, global_limits=["1/second"], key_func=get_remote_address) mock_handler = mock.Mock() mock_handler.level = logging.INFO limiter.logger.addHandler(mock_handler) bp = Blueprint("bp1", __name__) @bp.route("/t1") def t1(): return "42" limiter.limit(lambda: current_app.config.get("X"))(bp) app.register_blueprint(bp) with app.test_client() as cli: with hiro.Timeline().freeze() as timeline: self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) self.assertEqual(mock_handler.handle.call_count, 3) self.assertTrue("failed to load ratelimit" in mock_handler.handle.call_args_list[0][0][0].msg) self.assertTrue("failed to load ratelimit" in mock_handler.handle.call_args_list[1][0][0].msg) self.assertTrue("exceeded at endpoint" in mock_handler.handle.call_args_list[2][0][0].msg) def test_multiple_apps(self): app1 = Flask(__name__) app2 = Flask(__name__) limiter = Limiter(global_limits=["1/second"], key_func=get_remote_address) limiter.init_app(app1) limiter.init_app(app2) @app1.route("/ping") def ping(): return "PONG" @app1.route("/slowping") @limiter.limit("1/minute") def slow_ping(): return "PONG" @app2.route("/ping") @limiter.limit("2/second") def ping_2(): return "PONG" @app2.route("/slowping") @limiter.limit("2/minute") def slow_ping_2(): return "PONG" with hiro.Timeline().freeze() as timeline: with app1.test_client() as cli: self.assertEqual(cli.get("/ping").status_code, 200) self.assertEqual(cli.get("/ping").status_code, 429) timeline.forward(1) self.assertEqual(cli.get("/ping").status_code, 200) self.assertEqual(cli.get("/slowping").status_code, 200) timeline.forward(59) self.assertEqual(cli.get("/slowping").status_code, 429) timeline.forward(1) self.assertEqual(cli.get("/slowping").status_code, 200) with app2.test_client() as cli: self.assertEqual(cli.get("/ping").status_code, 200) self.assertEqual(cli.get("/ping").status_code, 200) self.assertEqual(cli.get("/ping").status_code, 429) timeline.forward(1) self.assertEqual(cli.get("/ping").status_code, 200) self.assertEqual(cli.get("/slowping").status_code, 200) timeline.forward(59) self.assertEqual(cli.get("/slowping").status_code, 200) self.assertEqual(cli.get("/slowping").status_code, 429) timeline.forward(1) self.assertEqual(cli.get("/slowping").status_code, 200) def test_headers_no_breach(self): app = Flask(__name__) limiter = Limiter( app, global_limits=["10/minute"], headers_enabled=True, key_func=get_remote_address ) @app.route("/t1") def t1(): return "test" @app.route("/t2") @limiter.limit("2/second; 5 per minute; 10/hour") def t2(): return "test" with hiro.Timeline().freeze(): with app.test_client() as cli: resp = cli.get("/t1") self.assertEqual( resp.headers.get('X-RateLimit-Limit'), '10' ) self.assertEqual( resp.headers.get('X-RateLimit-Remaining'), '9' ) self.assertEqual( resp.headers.get('X-RateLimit-Reset'), str(int(time.time() + 60)) ) self.assertEqual( resp.headers.get('Retry-After'), str(59) ) resp = cli.get("/t2") self.assertEqual( resp.headers.get('X-RateLimit-Limit'), '2' ) self.assertEqual( resp.headers.get('X-RateLimit-Remaining'), '1' ) self.assertEqual( resp.headers.get('X-RateLimit-Reset'), str(int(time.time() + 1)) ) self.assertEqual( resp.headers.get('Retry-After'), str(0) ) def test_headers_breach(self): app = Flask(__name__) limiter = Limiter( app, global_limits=["10/minute"], headers_enabled=True, key_func=get_remote_address ) @app.route("/t1") @limiter.limit("2/second; 10 per minute; 20/hour") def t(): return "test" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: for i in range(11): resp = cli.get("/t1") timeline.forward(1) self.assertEqual( resp.headers.get('X-RateLimit-Limit'), '10' ) self.assertEqual( resp.headers.get('X-RateLimit-Remaining'), '0' ) self.assertEqual( resp.headers.get('X-RateLimit-Reset'), str(int(time.time() + 49)) ) self.assertEqual( resp.headers.get('Retry-After'), str(int(49)) ) def test_custom_headers_from_setter(self): app = Flask(__name__) limiter = Limiter( app, global_limits=["10/minute"], headers_enabled=True, key_func=get_remote_address, retry_after='http-date' ) limiter._header_mapping[HEADERS.RESET] = 'X-Reset' limiter._header_mapping[HEADERS.LIMIT] = 'X-Limit' limiter._header_mapping[HEADERS.REMAINING] = 'X-Remaining' @app.route("/t1") @limiter.limit("2/second; 10 per minute; 20/hour") def t(): return "test" with hiro.Timeline().freeze(0) as timeline: with app.test_client() as cli: for i in range(11): resp = cli.get("/t1") timeline.forward(1) self.assertEqual( resp.headers.get('X-Limit'), '10' ) self.assertEqual( resp.headers.get('X-Remaining'), '0' ) self.assertEqual( resp.headers.get('X-Reset'), str(int(time.time() + 49)) ) self.assertEqual( resp.headers.get('Retry-After'), 'Thu, 01 Jan 1970 00:01:00 GMT' ) def test_custom_headers_from_config(self): app = Flask(__name__) app.config.setdefault(C.HEADER_LIMIT, "X-Limit") app.config.setdefault(C.HEADER_REMAINING, "X-Remaining") app.config.setdefault(C.HEADER_RESET, "X-Reset") limiter = Limiter( app, global_limits=["10/minute"], headers_enabled=True, key_func=get_remote_address ) @app.route("/t1") @limiter.limit("2/second; 10 per minute; 20/hour") def t(): return "test" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: for i in range(11): resp = cli.get("/t1") timeline.forward(1) self.assertEqual( resp.headers.get('X-Limit'), '10' ) self.assertEqual( resp.headers.get('X-Remaining'), '0' ) self.assertEqual( resp.headers.get('X-Reset'), str(int(time.time() + 49)) ) def test_named_shared_limit(self): app, limiter = self.build_app() shared_limit_a = limiter.shared_limit("1/minute", scope='a') shared_limit_b = limiter.shared_limit("1/minute", scope='b') @app.route("/t1") @shared_limit_a def route1(): return "route1" @app.route("/t2") @shared_limit_a def route2(): return "route2" @app.route("/t3") @shared_limit_b def route3(): return "route3" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: self.assertEqual(200, cli.get("/t1").status_code) self.assertEqual(200, cli.get("/t3").status_code) self.assertEqual(429, cli.get("/t2").status_code) def test_dynamic_shared_limit(self): app, limiter = self.build_app() fn_a = mock.Mock() fn_b = mock.Mock() fn_a.return_value = "foo" fn_b.return_value = "bar" dy_limit_a = limiter.shared_limit("1/minute", scope=fn_a) dy_limit_b = limiter.shared_limit("1/minute", scope=fn_b) @app.route("/t1") @dy_limit_a def t1(): return "route1" @app.route("/t2") @dy_limit_a def t2(): return "route2" @app.route("/t3") @dy_limit_b def t3(): return "route3" with hiro.Timeline().freeze(): with app.test_client() as cli: self.assertEqual(200, cli.get("/t1").status_code) self.assertEqual(200, cli.get("/t3").status_code) self.assertEqual(429, cli.get("/t2").status_code) self.assertEqual(429, cli.get("/t3").status_code) self.assertEqual(2, fn_a.call_count) self.assertEqual(2, fn_b.call_count) fn_b.assert_called_with("t3") fn_a.assert_has_calls([mock.call("t1"), mock.call("t2")]) def test_conditional_limits(self): """Test that the conditional activation of the limits work.""" app = Flask(__name__) limiter = Limiter(app, key_func=get_remote_address) @app.route("/limited") @limiter.limit("1 per day") def limited_route(): return "passed" @app.route("/unlimited") @limiter.limit("1 per day", exempt_when=lambda: True) def never_limited_route(): return "should always pass" is_exempt = False @app.route("/conditional") @limiter.limit("1 per day", exempt_when=lambda: is_exempt) def conditionally_limited_route(): return "conditional" with app.test_client() as cli: self.assertEqual(cli.get("/limited").status_code, 200) self.assertEqual(cli.get("/limited").status_code, 429) self.assertEqual(cli.get("/unlimited").status_code, 200) self.assertEqual(cli.get("/unlimited").status_code, 200) self.assertEqual(cli.get("/conditional").status_code, 200) self.assertEqual(cli.get("/conditional").status_code, 429) is_exempt = True self.assertEqual(cli.get("/conditional").status_code, 200) is_exempt = False self.assertEqual(cli.get("/conditional").status_code, 429) def test_conditional_shared_limits(self): """Test that conditional shared limits work.""" app = Flask(__name__) limiter = Limiter(app, key_func=get_remote_address) @app.route("/limited") @limiter.shared_limit("1 per day", "test_scope") def limited_route(): return "passed" @app.route("/unlimited") @limiter.shared_limit("1 per day", "test_scope", exempt_when=lambda: True) def never_limited_route(): return "should always pass" is_exempt = False @app.route("/conditional") @limiter.shared_limit("1 per day", "test_scope", exempt_when=lambda: is_exempt) def conditionally_limited_route(): return "conditional" with app.test_client() as cli: self.assertEqual(cli.get("/unlimited").status_code, 200) self.assertEqual(cli.get("/unlimited").status_code, 200) self.assertEqual(cli.get("/limited").status_code, 200) self.assertEqual(cli.get("/limited").status_code, 429) self.assertEqual(cli.get("/conditional").status_code, 429) is_exempt = True self.assertEqual(cli.get("/conditional").status_code, 200) is_exempt = False self.assertEqual(cli.get("/conditional").status_code, 429) def test_whitelisting(self): app = Flask(__name__) limiter = Limiter( app, global_limits=["1/minute"], headers_enabled=True, key_func=get_remote_address ) @app.route("/") def t(): return "test" @limiter.request_filter def w(): if request.headers.get("internal", None) == "true": return True return False with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: self.assertEqual(cli.get("/").status_code, 200) self.assertEqual(cli.get("/").status_code, 429) timeline.forward(60) self.assertEqual(cli.get("/").status_code, 200) for i in range(0, 10): self.assertEqual( cli.get("/", headers={"internal": "true"}).status_code, 200 ) def test_pluggable_views(self): app, limiter = self.build_app( global_limits=["1/hour"] ) class Va(View): methods = ['GET', 'POST'] decorators = [limiter.limit("2/second")] def dispatch_request(self): return request.method.lower() class Vb(View): methods = ['GET'] decorators = [limiter.limit("1/second, 3/minute")] def dispatch_request(self): return request.method.lower() class Vc(View): methods = ['GET'] def dispatch_request(self): return request.method.lower() app.add_url_rule("/a", view_func=Va.as_view("a")) app.add_url_rule("/b", view_func=Vb.as_view("b")) app.add_url_rule("/c", view_func=Vc.as_view("c")) with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: self.assertEqual(200, cli.get("/a").status_code) self.assertEqual(200, cli.get("/a").status_code) self.assertEqual(429, cli.post("/a").status_code) self.assertEqual(200, cli.get("/b").status_code) timeline.forward(1) self.assertEqual(200, cli.get("/b").status_code) timeline.forward(1) self.assertEqual(200, cli.get("/b").status_code) timeline.forward(1) self.assertEqual(429, cli.get("/b").status_code) self.assertEqual(200, cli.get("/c").status_code) self.assertEqual(429, cli.get("/c").status_code) def test_pluggable_method_views(self): app, limiter = self.build_app( global_limits=["1/hour"] ) class Va(MethodView): decorators = [limiter.limit("2/second")] def get(self): return request.method.lower() def post(self): return request.method.lower() class Vb(MethodView): decorators = [limiter.limit("1/second, 3/minute")] def get(self): return request.method.lower() class Vc(MethodView): def get(self): return request.method.lower() class Vd(MethodView): decorators = [limiter.limit("1/minute", methods=['get'])] def get(self): return request.method.lower() def post(self): return request.method.lower() app.add_url_rule("/a", view_func=Va.as_view("a")) app.add_url_rule("/b", view_func=Vb.as_view("b")) app.add_url_rule("/c", view_func=Vc.as_view("c")) app.add_url_rule("/d", view_func=Vd.as_view("d")) with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: self.assertEqual(200, cli.get("/a").status_code) self.assertEqual(200, cli.get("/a").status_code) self.assertEqual(429, cli.get("/a").status_code) self.assertEqual(429, cli.post("/a").status_code) self.assertEqual(200, cli.get("/b").status_code) timeline.forward(1) self.assertEqual(200, cli.get("/b").status_code) timeline.forward(1) self.assertEqual(200, cli.get("/b").status_code) timeline.forward(1) self.assertEqual(429, cli.get("/b").status_code) self.assertEqual(200, cli.get("/c").status_code) self.assertEqual(429, cli.get("/c").status_code) self.assertEqual(200, cli.get("/d").status_code) self.assertEqual(429, cli.get("/d").status_code) self.assertEqual(200, cli.post("/d").status_code) self.assertEqual(200, cli.post("/d").status_code) def test_flask_restful_resource(self): app, limiter = self.build_app( global_limits=["1/hour"] ) api = restful.Api(app) class Va(Resource): decorators = [limiter.limit("2/second")] def get(self): return request.method.lower() def post(self): return request.method.lower() class Vb(Resource): decorators = [limiter.limit("1/second, 3/minute")] def get(self): return request.method.lower() class Vc(Resource): def get(self): return request.method.lower() api.add_resource(Va, "/a") api.add_resource(Vb, "/b") api.add_resource(Vc, "/c") with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: self.assertEqual(200, cli.get("/a").status_code) self.assertEqual(200, cli.get("/a").status_code) self.assertEqual(429, cli.get("/a").status_code) self.assertEqual(429, cli.post("/a").status_code) self.assertEqual(200, cli.get("/b").status_code) timeline.forward(1) self.assertEqual(200, cli.get("/b").status_code) timeline.forward(1) self.assertEqual(200, cli.get("/b").status_code) timeline.forward(1) self.assertEqual(429, cli.get("/b").status_code) self.assertEqual(200, cli.get("/c").status_code) self.assertEqual(429, cli.get("/c").status_code) def test_separate_method_limits(self): app, limiter = self.build_app() @limiter.limit("1/second", per_method=True) @app.route("/", methods=["GET", "POST"]) def root(): return "root" with hiro.Timeline(): with app.test_client() as cli: self.assertEqual(200, cli.get("/").status_code) self.assertEqual(429, cli.get("/").status_code) self.assertEqual(200, cli.post("/").status_code) self.assertEqual(429, cli.post("/").status_code) def test_explicit_method_limits(self): app, limiter = self.build_app() @limiter.limit("1/second", methods=["GET"]) @app.route("/", methods=["GET", "POST"]) def root(): return "root" with hiro.Timeline(): with app.test_client() as cli: self.assertEqual(200, cli.get("/").status_code) self.assertEqual(429, cli.get("/").status_code) self.assertEqual(200, cli.post("/").status_code) self.assertEqual(200, cli.post("/").status_code) def test_no_auto_check(self): app, limiter = self.build_app(auto_check=False) @limiter.limit("1/second", per_method=True) @app.route("/", methods=["GET", "POST"]) def root(): return "root" with hiro.Timeline().freeze(): with app.test_client() as cli: self.assertEqual(200, cli.get("/").status_code) self.assertEqual(200, cli.get("/").status_code) # attach before_request to perform check @app.before_request def _(): limiter.check() with hiro.Timeline().freeze(): with app.test_client() as cli: self.assertEqual(200, cli.get("/").status_code) self.assertEqual(429, cli.get("/").status_code) def test_custom_error_message(self): app, limiter = self.build_app() @app.errorhandler(429) def ratelimit_handler(e): return make_response( e.description , 429 ) l1 = lambda: "1/second" e1 = lambda: "dos" @limiter.limit("1/second", error_message="uno") @app.route("/t1") def t1(): return "1" @limiter.limit(l1, error_message=e1) @app.route("/t2") def t2(): return "2" s1 = limiter.shared_limit("1/second", scope='error_message', error_message="tres") @app.route("/t3") @s1 def t3(): return "3" with hiro.Timeline().freeze(): with app.test_client() as cli: cli.get("/t1") resp = cli.get("/t1") self.assertEqual(429, resp.status_code) self.assertEqual(resp.data, b'uno') cli.get("/t2") resp = cli.get("/t2") self.assertEqual(429, resp.status_code) self.assertEqual(resp.data, b'dos') cli.get("/t3") resp = cli.get("/t3") self.assertEqual(429, resp.status_code) self.assertEqual(resp.data, b'tres') flask-limiter-0.9.3/tests/test_regressions.py000066400000000000000000000050221267142337500214150ustar00rootroot00000000000000""" """ import time import logging import unittest from flask import Flask import hiro import mock import redis from flask.ext.limiter.extension import C, Limiter from flask.ext.limiter.util import get_ipaddr class RegressionTests(unittest.TestCase): def setUp(self): redis.Redis().flushall() def build_app(self, config={}, **limiter_args): app = Flask(__name__) for k, v in config.items(): app.config.setdefault(k, v) limiter = Limiter(app, key_func=get_ipaddr, **limiter_args) mock_handler = mock.Mock() mock_handler.level = logging.INFO limiter.logger.addHandler(mock_handler) return app, limiter def test_redis_request_slower_than_fixed_window(self): app, limiter = self.build_app({ C.GLOBAL_LIMITS: "5 per second", C.STORAGE_URL: "redis://localhost:6379", C.STRATEGY: "fixed-window", C.HEADERS_ENABLED: True }) @app.route("/t1") def t1(): time.sleep(1.1) return "t1" with app.test_client() as cli: resp = cli.get("/t1") self.assertEqual( resp.headers["X-RateLimit-Remaining"], '5' ) def test_redis_request_slower_than_moving_window(self): app, limiter = self.build_app({ C.GLOBAL_LIMITS: "5 per second", C.STORAGE_URL: "redis://localhost:6379", C.STRATEGY: "moving-window", C.HEADERS_ENABLED: True }) @app.route("/t1") def t1(): time.sleep(1.1) return "t1" with app.test_client() as cli: resp = cli.get("/t1") self.assertEqual( resp.headers["X-RateLimit-Remaining"], '5' ) def test_dynamic_limits(self): app, limiter = self.build_app({ C.STRATEGY: "moving-window", C.HEADERS_ENABLED: True }) def func(*a): return "1/second; 2/minute" @app.route("/t1") @limiter.limit(func) def t1(): return "t1" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) timeline.forward(2) self.assertEqual(cli.get("/t1").status_code, 200) self.assertEqual(cli.get("/t1").status_code, 429) flask-limiter-0.9.3/versioneer.py000066400000000000000000000573571267142337500170540ustar00rootroot00000000000000#! /usr/bin/python """versioneer.py (like a rocketeer, but for versions) * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Version: 0.8+ This file helps distutils-based projects manage their version number by just creating version-control tags. For developers who work from a VCS-generated tree (e.g. 'git clone' etc), each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a version number by asking your version-control tool about the current checkout. The version number will be written into a generated _version.py file of your choosing, where it can be included by your __init__.py For users who work from a VCS-generated tarball (e.g. 'git archive'), it will compute a version number by looking at the name of the directory created when te tarball is unpacked. This conventionally includes both the name of the project and a version number. For users who work from a tarball built by 'setup.py sdist', it will get a version number from a previously-generated _version.py file. As a result, loading code directly from the source tree will not result in a real version. If you want real versions from VCS trees (where you frequently update from the upstream repository, or do new development), you will need to do a 'setup.py version' after each update, and load code from the build/ directory. You need to provide this code with a few configuration values: versionfile_source: A project-relative pathname into which the generated version strings should be written. This is usually a _version.py next to your project's main __init__.py file. If your project uses src/myproject/__init__.py, this should be 'src/myproject/_version.py'. This file should be checked in to your VCS as usual: the copy created below by 'setup.py update_files' will include code that parses expanded VCS keywords in generated tarballs. The 'build' and 'sdist' commands will replace it with a copy that has just the calculated version string. versionfile_build: Like versionfile_source, but relative to the build directory instead of the source directory. These will differ when your setup.py uses 'package_dir='. If you have package_dir={'myproject': 'src/myproject'}, then you will probably have versionfile_build='myproject/_version.py' and versionfile_source='src/myproject/_version.py'. tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all VCS tags. If your tags look like 'myproject-1.2.0', then you should use tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this should be an empty string. parentdir_prefix: a string, frequently the same as tag_prefix, which appears at the start of all unpacked tarball filenames. If your tarball unpacks into 'myproject-1.2.0', this should be 'myproject-'. To use it: 1: include this file in the top level of your project 2: make the following changes to the top of your setup.py: import versioneer versioneer.versionfile_source = 'src/myproject/_version.py' versioneer.versionfile_build = 'myproject/_version.py' versioneer.tag_prefix = '' # tags are like 1.2.0 versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0' 3: add the following arguments to the setup() call in your setup.py: version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), 4: run 'setup.py update_files', which will create _version.py, and will modify your __init__.py to define __version__ (by calling a function from _version.py) 5: modify your MANIFEST.in to include versioneer.py 6: add both versioneer.py and the generated _version.py to your VCS """ import os, sys, re from setuptools import Command from setuptools.command.sdist import sdist as _sdist from distutils.command.build import build as _build versionfile_source = None versionfile_build = None tag_prefix = None parentdir_prefix = None VCS = "git" LONG_VERSION_PY = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (build by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.8+ (https://github.com/warner/python-versioneer) # these strings will be replaced by git during git-archive git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" import subprocess import sys def run_command(args, cwd=None, verbose=False, hide_stderr=False): try: # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) except EnvironmentError: e = sys.exc_info()[1] if verbose: print("unable to run %%s" %% args[0]) print(e) return None stdout = p.communicate()[0].strip() if sys.version >= '3': stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% args[0]) return None return stdout import sys import re import os.path def get_expanded_variables(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # variables. When used from setup.py, we don't want to import # _version.py, so we do it with a regexp instead. This function is not # used from _version.py. variables = {} try: f = open(versionfile_abs,"r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["full"] = mo.group(1) f.close() except EnvironmentError: pass return variables def versions_from_expanded_variables(variables, tag_prefix, verbose=False): refnames = variables["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("variables are unexpanded, not using") return {} # unexpanded, so not in an unpacked git-archive tarball refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs-tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return { "version": r, "full": variables["full"].strip() } # no suitable tags, so we use the full revision id if verbose: print("no suitable tags, using full revision id") return { "version": variables["full"].strip(), "full": variables["full"].strip() } def versions_from_vcs(tag_prefix, root, verbose=False): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' variables were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, # meaning we're inside a checked out source tree. if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %%s" %% root) return {} GIT = "git" if sys.platform == "win32": GIT = "git.cmd" stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"], cwd=root) if stdout is None: return {} if not stdout.startswith(tag_prefix): if verbose: print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix)) return {} tag = stdout[len(tag_prefix):] stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root) if stdout is None: return {} full = stdout.strip() if tag.endswith("-dirty"): full += "-dirty" return {"version": tag, "full": full} def versions_from_parentdir(parentdir_prefix, root, verbose=False): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %% (root, dirname, parentdir_prefix)) return None return {"version": dirname[len(parentdir_prefix):], "full": ""} tag_prefix = "%(TAG_PREFIX)s" parentdir_prefix = "%(PARENTDIR_PREFIX)s" versionfile_source = "%(VERSIONFILE_SOURCE)s" def get_versions(default={"version": "unknown", "full": ""}, verbose=False): # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded variables. variables = { "refnames": git_refnames, "full": git_full } ver = versions_from_expanded_variables(variables, tag_prefix, verbose) if ver: return ver try: root = os.path.abspath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in range(len(versionfile_source.split("/"))): root = os.path.dirname(root) except NameError: return default return (versions_from_vcs(tag_prefix, root, verbose) or versions_from_parentdir(parentdir_prefix, root, verbose) or default) ''' import subprocess import sys def run_command(args, cwd=None, verbose=False, hide_stderr=False): try: # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) except EnvironmentError: e = sys.exc_info()[1] if verbose: print("unable to run %s" % args[0]) print(e) return None stdout = p.communicate()[0].strip() if sys.version >= '3': stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % args[0]) return None return stdout import sys import re import os.path def get_expanded_variables(versionfile_abs): # the code embedded in _version.py can just fetch the value of these # variables. When used from setup.py, we don't want to import # _version.py, so we do it with a regexp instead. This function is not # used from _version.py. variables = {} try: f = open(versionfile_abs,"r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["full"] = mo.group(1) f.close() except EnvironmentError: pass return variables def versions_from_expanded_variables(variables, tag_prefix, verbose=False): refnames = variables["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("variables are unexpanded, not using") return {} # unexpanded, so not in an unpacked git-archive tarball refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs-tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return { "version": r, "full": variables["full"].strip() } # no suitable tags, so we use the full revision id if verbose: print("no suitable tags, using full revision id") return { "version": variables["full"].strip(), "full": variables["full"].strip() } def versions_from_vcs(tag_prefix, root, verbose=False): # this runs 'git' from the root of the source tree. This only gets called # if the git-archive 'subst' variables were *not* expanded, and # _version.py hasn't already been rewritten with a short version string, # meaning we're inside a checked out source tree. if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) return {} GIT = "git" if sys.platform == "win32": GIT = "git.cmd" stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"], cwd=root) if stdout is None: return {} if not stdout.startswith(tag_prefix): if verbose: print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)) return {} tag = stdout[len(tag_prefix):] stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root) if stdout is None: return {} full = stdout.strip() if tag.endswith("-dirty"): full += "-dirty" return {"version": tag, "full": full} def versions_from_parentdir(parentdir_prefix, root, verbose=False): # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % (root, dirname, parentdir_prefix)) return None return {"version": dirname[len(parentdir_prefix):], "full": ""} import os.path import sys # os.path.relpath only appeared in Python-2.6 . Define it here for 2.5. def os_path_relpath(path, start=os.path.curdir): """Return a relative version of a path""" if not path: raise ValueError("no path specified") start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x] path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x] # Work out how much of the filepath is shared by start and path. i = len(os.path.commonprefix([start_list, path_list])) rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return os.path.curdir return os.path.join(*rel_list) def do_vcs_install(versionfile_source, ipy): GIT = "git" if sys.platform == "win32": GIT = "git.cmd" files = [versionfile_source, ipy] try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os_path_relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command([GIT, "add", "--"] + files) SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.8+) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. version_version = '%(version)s' version_full = '%(full)s' def get_versions(default={}, verbose=False): return {'version': version_version, 'full': version_full} """ DEFAULT = {"version": "unknown", "full": "unknown"} def versions_from_file(filename): versions = {} try: f = open(filename) except EnvironmentError: return versions for line in f.readlines(): mo = re.match("version_version = '([^']+)'", line) if mo: versions["version"] = mo.group(1) mo = re.match("version_full = '([^']+)'", line) if mo: versions["full"] = mo.group(1) f.close() return versions def write_to_version_file(filename, versions): f = open(filename, "w") f.write(SHORT_VERSION_PY % versions) f.close() print("set %s to '%s'" % (filename, versions["version"])) def get_versions(default=DEFAULT, verbose=False): # returns dict with two keys: 'version' and 'full' assert versionfile_source is not None, "please set versioneer.versionfile_source" assert tag_prefix is not None, "please set versioneer.tag_prefix" assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix" # I am in versioneer.py, which must live at the top of the source tree, # which we use to compute the root directory. py2exe/bbfreeze/non-CPython # don't have __file__, in which case we fall back to sys.argv[0] (which # ought to be the setup.py script). We prefer __file__ since that's more # robust in cases where setup.py was invoked in some weird way (e.g. pip) try: root = os.path.dirname(os.path.abspath(__file__)) except NameError: root = os.path.dirname(os.path.abspath(sys.argv[0])) versionfile_abs = os.path.join(root, versionfile_source) # extract version from first of _version.py, 'git describe', parentdir. # This is meant to work for developers using a source checkout, for users # of a tarball created by 'setup.py sdist', and for users of a # tarball/zipball created by 'git archive' or github's download-from-tag # feature. variables = get_expanded_variables(versionfile_abs) if variables: ver = versions_from_expanded_variables(variables, tag_prefix) if ver: if verbose: print("got version from expanded variable %s" % ver) return ver ver = versions_from_file(versionfile_abs) if ver: if verbose: print("got version from file %s %s" % (versionfile_abs,ver)) return ver ver = versions_from_vcs(tag_prefix, root, verbose) if ver: if verbose: print("got version from git %s" % ver) return ver ver = versions_from_parentdir(parentdir_prefix, root, verbose) if ver: if verbose: print("got version from parentdir %s" % ver) return ver if verbose: print("got version from default %s" % ver) return default def get_version(verbose=False): return get_versions(verbose=verbose)["version"] class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): ver = get_version(verbose=True) print("Version is currently: %s" % ver) class cmd_build(_build): def run(self): versions = get_versions(verbose=True) _build.run(self) # now locate _version.py in the new build/ directory and replace it # with an updated value target_versionfile = os.path.join(self.build_lib, versionfile_build) print("UPDATING %s" % target_versionfile) os.unlink(target_versionfile) f = open(target_versionfile, "w") f.write(SHORT_VERSION_PY % versions) f.close() class cmd_sdist(_sdist): def run(self): versions = get_versions(verbose=True) self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory (remembering # that it may be a hardlink) and replace it with an updated value target_versionfile = os.path.join(base_dir, versionfile_source) print("UPDATING %s" % target_versionfile) os.unlink(target_versionfile) f = open(target_versionfile, "w") f.write(SHORT_VERSION_PY % self._versioneer_generated_versions) f.close() INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ class cmd_update_files(Command): description = "modify __init__.py and create _version.py" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py") print(" creating %s" % versionfile_source) f = open(versionfile_source, "w") f.write(LONG_VERSION_PY % {"DOLLAR": "$", "TAG_PREFIX": tag_prefix, "PARENTDIR_PREFIX": parentdir_prefix, "VERSIONFILE_SOURCE": versionfile_source, }) f.close() try: old = open(ipy, "r").read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) f = open(ipy, "a") f.write(INIT_PY_SNIPPET) f.close() else: print(" %s unmodified" % ipy) do_vcs_install(versionfile_source, ipy) def get_cmdclass(): return {'version': cmd_version, 'update_files': cmd_update_files, 'build': cmd_build, 'sdist': cmd_sdist, }