pax_global_header00006660000000000000000000000064137470504050014517gustar00rootroot0000000000000052 comment=2047e7ae3c825bf52dad10cc8402d09e11091bc1 tornado-6.1.0/000077500000000000000000000000001374705040500131715ustar00rootroot00000000000000tornado-6.1.0/.coveragerc000066400000000000000000000007311374705040500153130ustar00rootroot00000000000000# Test coverage configuration. # Usage: # pip install coverage # coverage erase # clears previous data if any # coverage run -m tornado.test.runtests # coverage report # prints to stdout # coverage html # creates ./htmlcov/*.html including annotated source [run] branch = true source = tornado omit = tornado/platform/* tornado/test/* */_auto2to3* [report] # Ignore missing source files, i.e. fake template-generated "files" ignore_errors = true tornado-6.1.0/.flake8000066400000000000000000000007041374705040500143450ustar00rootroot00000000000000[flake8] exclude = .git,.tox,__pycache__,.eggs,build max-line-length = 100 ignore = # E265 block comment should start with '# ' E265, # E266 too many leading '#' for block comment E266, # E402 module level import not at top of file E402, # E722 do not use bare except E722, # flake8 and black disagree about # W503 line break before binary operator # E203 whitespace before ':' W503,E203 doctests = true tornado-6.1.0/.gitattributes000066400000000000000000000002731374705040500160660ustar00rootroot00000000000000# Tests of static file handling assume unix-style line endings. tornado/test/static/*.txt text eol=lf tornado/test/static/dir/*.html text eol=lf tornado/test/templates/*.html text eol=lf tornado-6.1.0/.gitignore000066400000000000000000000002351374705040500151610ustar00rootroot00000000000000*.pyc *.pyo *.so *.class *~ build/ /dist/ MANIFEST /tornado.egg-info/ .tox/ .vagrant /.coverage /htmlcov/ /env/ # Used in demo apps secrets.cfg .mypy_cache/ tornado-6.1.0/.travis.yml000066400000000000000000000075761374705040500153210ustar00rootroot00000000000000# https://travis-ci.org/tornadoweb/tornado os: linux dist: xenial language: python addons: apt: packages: - libgnutls-dev env: global: - CIBW_BUILD="cp3[56789]*" - CIBW_TEST_COMMAND="python3 -m tornado.test" - CIBW_TEST_COMMAND_WINDOWS="python -m tornado.test --fail-if-logs=false" # Before starting the full build matrix, run one test configuration # and the linter (the `black` linter is especially likely to catch # first-time contributors). stages: - quick - test jobs: fast_finish: true include: # We have two and a half types of builds: Wheel builds run on all supported # platforms and run the basic test suite for all supported python versions. # Sdist builds (the "and a half") just build an sdist and run some basic # validation. Both of these upload their artifacts to pypi if there is a # tag on the build and the key is available. # # Tox builds run a more comprehensive test suite with more configurations # and dependencies (getting all these dependencies installed for wheel # builds is a pain, and slows things down because we don't use as much # parallelism there. We could parallelize wheel builds more but we're also # amortizing download costs across the different builds). The wheel builds # take longer, so we run them before the tox builds for better bin packing # in our allotted concurrency. - python: '3.8' arch: amd64 services: docker env: BUILD_WHEEL=1 - python: '3.8' arch: arm64 services: docker env: BUILD_WHEEL=1 ASYNC_TEST_TIMEOUT=15 - os: windows env: PATH=/c/Python38:/c/Python38/Scripts:$PATH BUILD_WHEEL=1 language: shell before_install: - choco install python --version 3.8.0 # Windows build images have outdated root certificates; until that's # fixed use certifi instead. # https://github.com/joerick/cibuildwheel/issues/452 - python -m pip install certifi - export SSL_CERT_FILE=`python -c "import certifi; print(certifi.where())"` - os: osx env: BUILD_WHEEL=1 language: shell - python: '3.8' arch: amd64 env: BUILD_SDIST=1 # 3.5.2 is interesting because it's the version in ubuntu 16.04, and due to python's # "provisional feature" rules there are significant differences between patch # versions for asyncio and typing. - python: 3.5.2 # Twisted doesn't install on python 3.5.2, so don't run the "full" tests. env: TOX_ENV=py35 stage: test - python: '3.5' env: TOX_ENV=py35-full - python: '3.6' env: TOX_ENV=py36-full - python: '3.7' env: TOX_ENV=py37-full - python: '3.8' env: TOX_ENV=py38-full - python: '3.9-dev' env: TOX_ENV=py39-full - python: nightly env: TOX_ENV=py3 - python: pypy3.6-7.3.1 # Pypy is a lot slower due to jit warmup costs, so don't run the "full" # test config there. env: TOX_ENV=pypy3 # Docs and lint python versions must be synced with those in tox.ini - python: '3.8' env: TOX_ENV=docs # the quick stage runs first, but putting this at the end lets us take # advantage of travis-ci's defaults and not repeat stage:test in the others. - python: '3.8' env: TOX_ENV=py38,lint stage: quick install: - if [[ -n "$TOX_ENV" ]]; then pip3 install tox; fi - if [[ -n "$BUILD_WHEEL" ]]; then pip3 install cibuildwheel; fi - if [[ -n "$BUILD_WHEEL" || -n "$BUILD_SDIST" ]]; then pip3 install twine; fi script: - if [[ -n "$TOX_ENV" ]]; then tox -e $TOX_ENV -- $TOX_ARGS; fi - if [[ -n "$BUILD_WHEEL" ]]; then cibuildwheel --output-dir dist && ls -l dist; fi - if [[ -n "$BUILD_SDIST" ]]; then python setup.py check sdist && ls -l dist; fi after_success: - if [[ ( -n "$BUILD_WHEEL" || -n "$BUILD_SDIST" ) && -n "$TRAVIS_TAG" && -n "$TWINE_PASSWORD" ]]; then twine upload -u __token__ dist/*; fi tornado-6.1.0/LICENSE000066400000000000000000000261361374705040500142060ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. tornado-6.1.0/MANIFEST.in000066400000000000000000000014771374705040500147400ustar00rootroot00000000000000recursive-include demos *.py *.yaml *.html *.css *.js *.xml *.sql README recursive-include docs * prune docs/build include tornado/py.typed include tornado/speedups.c include tornado/test/README include tornado/test/csv_translations/fr_FR.csv include tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo include tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po include tornado/test/options_test.cfg include tornado/test/static/robots.txt include tornado/test/static/sample.xml include tornado/test/static/sample.xml.gz include tornado/test/static/sample.xml.bz2 include tornado/test/static/dir/index.html include tornado/test/static_foo.txt include tornado/test/templates/utf8.html include tornado/test/test.crt include tornado/test/test.key include LICENSE include README.rst include runtests.sh tornado-6.1.0/README.rst000066400000000000000000000030421374705040500146570ustar00rootroot00000000000000Tornado Web Server ================== .. image:: https://badges.gitter.im/Join%20Chat.svg :alt: Join the chat at https://gitter.im/tornadoweb/tornado :target: https://gitter.im/tornadoweb/tornado?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge `Tornado `_ is a Python web framework and asynchronous networking library, originally developed at `FriendFeed `_. By using non-blocking network I/O, Tornado can scale to tens of thousands of open connections, making it ideal for `long polling `_, `WebSockets `_, and other applications that require a long-lived connection to each user. Hello, world ------------ Here is a simple "Hello, world" example web app for Tornado: .. code-block:: python import tornado.ioloop import tornado.web class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") def make_app(): return tornado.web.Application([ (r"/", MainHandler), ]) if __name__ == "__main__": app = make_app() app.listen(8888) tornado.ioloop.IOLoop.current().start() This example does not use any of Tornado's asynchronous features; for that see this `simple chat room `_. Documentation ------------- Documentation and links to additional resources are available at https://www.tornadoweb.org tornado-6.1.0/appveyor.yml000066400000000000000000000062061374705040500155650ustar00rootroot00000000000000# Appveyor is Windows CI: https://ci.appveyor.com/project/bdarnell/tornado environment: global: TORNADO_EXTENSION: "1" # We only build with 3.5+ because it works out of the box, while other # versions require lots of machinery. # # We produce binary wheels for 32- and 64-bit builds, but because # the tests are so slow on Windows (6 minutes vs 15 seconds on Linux # or MacOS), we don't want to test the full matrix. We do full # tests on a couple of configurations and on the others we limit # the tests to the websocket module (which, because it exercises the # C extension module, is most likely to exhibit differences between # 32- and 64-bits) matrix: - PYTHON: "C:\\Python35" PYTHON_VERSION: "3.5.x" PYTHON_ARCH: "32" TOX_ENV: "py35" TOX_ARGS: "" - PYTHON: "C:\\Python35-x64" PYTHON_VERSION: "3.5.x" PYTHON_ARCH: "64" TOX_ENV: "py35" TOX_ARGS: "tornado.test.websocket_test" - PYTHON: "C:\\Python36" PYTHON_VERSION: "3.6.x" PYTHON_ARCH: "32" TOX_ENV: "py36" TOX_ARGS: "tornado.test.websocket_test" - PYTHON: "C:\\Python36-x64" PYTHON_VERSION: "3.6.x" PYTHON_ARCH: "64" TOX_ENV: "py36" TOX_ARGS: "" - PYTHON: "C:\\Python37" PYTHON_VERSION: "3.7.x" PYTHON_ARCH: "32" TOX_ENV: "py37" TOX_ARGS: "tornado.test.websocket_test" - PYTHON: "C:\\Python37-x64" PYTHON_VERSION: "3.7.x" PYTHON_ARCH: "64" TOX_ENV: "py37" TOX_ARGS: "" - PYTHON: "C:\\Python38" PYTHON_VERSION: "3.8.x" PYTHON_ARCH: "32" TOX_ENV: "py38" TOX_ARGS: "--fail-if-logs=false tornado.test.websocket_test" - PYTHON: "C:\\Python38-x64" PYTHON_VERSION: "3.8.x" PYTHON_ARCH: "64" TOX_ENV: "py38" # Suppress the log-cleanliness assertions because of https://bugs.python.org/issue39010 TOX_ARGS: "--fail-if-logs=false" install: # Make sure the right python version is first on the PATH. - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" # Check that we have the expected version and architecture for Python - "python --version" - "python -c \"import struct; print(struct.calcsize('P') * 8)\"" # Upgrade to the latest version of pip to avoid it displaying warnings # about it being out of date. - "python -m pip install --disable-pip-version-check --user --upgrade pip" - "python -m pip install tox wheel" build: false # Not a C# project, build stuff at the test step instead. test_script: # Build the compiled extension and run the project tests. # This is a bit of a hack that doesn't scale with new python versions, # but for now it lets us avoid duplication with .travis.yml and tox.ini. # Running "py3x-full" would be nice but it's failing on installing # dependencies with no useful logs. - "tox -e %TOX_ENV% -- %TOX_ARGS%" after_test: # If tests are successful, create binary packages for the project. - "python setup.py bdist_wheel" - ps: "ls dist" artifacts: # Archive the generated packages in the ci.appveyor.com build report. - path: dist\* #on_success: # - TODO: upload the content of dist/*.whl to a public wheelhouse # tornado-6.1.0/codecov.yml000066400000000000000000000000451374705040500153350ustar00rootroot00000000000000comment: off coverage: status: off tornado-6.1.0/demos/000077500000000000000000000000001374705040500143005ustar00rootroot00000000000000tornado-6.1.0/demos/blog/000077500000000000000000000000001374705040500152235ustar00rootroot00000000000000tornado-6.1.0/demos/blog/Dockerfile000066400000000000000000000003221374705040500172120ustar00rootroot00000000000000FROM python:3.7 EXPOSE 8888 RUN mkdir -p /usr/src/app WORKDIR /usr/src/app COPY requirements.txt /usr/src/app/ RUN pip install --no-cache-dir -r requirements.txt COPY . . ENTRYPOINT ["python3", "blog.py"] tornado-6.1.0/demos/blog/README000066400000000000000000000040721374705040500161060ustar00rootroot00000000000000Running the Tornado Blog example app ==================================== This demo is a simple blogging engine that uses a database to store posts. You must have PostgreSQL or CockroachDB installed to run this demo. If you have `docker` and `docker-compose` installed, the demo and all its prerequisites can be installed with `docker-compose up`. 1. Install a database if needed Consult the documentation at either https://www.postgresql.org or https://www.cockroachlabs.com to install one of these databases for your platform. 2. Install Python prerequisites This demo requires Python 3.5 or newer, and the packages listed in requirements.txt. Install them with `pip -r requirements.txt` 3. Create a database and user for the blog. Connect to the database with `psql -U postgres` (for PostgreSQL) or `cockroach sql` (for CockroachDB). Create a database and user, and grant permissions: CREATE DATABASE blog; CREATE USER blog WITH PASSWORD 'blog'; GRANT ALL ON DATABASE blog TO blog; (If using CockroachDB in insecure mode, omit the `WITH PASSWORD 'blog'`) 4. Create the tables in your new database (optional): The blog application will create its tables automatically when starting up. It's also possible to create them separately. You can use the provided schema.sql file by running this command for PostgreSQL: psql -U blog -d blog < schema.sql Or this one for CockcroachDB: cockroach sql -u blog -d blog < schema.sql You can run the above command again later if you want to delete the contents of the blog and start over after testing. 5. Run the blog example For PostgreSQL, you can just run ./blog.py For CockroachDB, run ./blog.py --db_port=26257 If you've changed anything from the defaults, use the other `--db_*` flags. 6. Visit your new blog Open http://localhost:8888/ in your web browser. Currently the first user to connect will automatically be given the ability to create and edit posts. Once you've created one blog post, subsequent users will not be prompted to sign in. tornado-6.1.0/demos/blog/blog.py000077500000000000000000000244221374705040500165270ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import aiopg import bcrypt import markdown import os.path import psycopg2 import re import tornado.escape import tornado.httpserver import tornado.ioloop import tornado.locks import tornado.options import tornado.web import unicodedata from tornado.options import define, options define("port", default=8888, help="run on the given port", type=int) define("db_host", default="127.0.0.1", help="blog database host") define("db_port", default=5432, help="blog database port") define("db_database", default="blog", help="blog database name") define("db_user", default="blog", help="blog database user") define("db_password", default="blog", help="blog database password") class NoResultError(Exception): pass async def maybe_create_tables(db): try: with (await db.cursor()) as cur: await cur.execute("SELECT COUNT(*) FROM entries LIMIT 1") await cur.fetchone() except psycopg2.ProgrammingError: with open("schema.sql") as f: schema = f.read() with (await db.cursor()) as cur: await cur.execute(schema) class Application(tornado.web.Application): def __init__(self, db): self.db = db handlers = [ (r"/", HomeHandler), (r"/archive", ArchiveHandler), (r"/feed", FeedHandler), (r"/entry/([^/]+)", EntryHandler), (r"/compose", ComposeHandler), (r"/auth/create", AuthCreateHandler), (r"/auth/login", AuthLoginHandler), (r"/auth/logout", AuthLogoutHandler), ] settings = dict( blog_title=u"Tornado Blog", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), ui_modules={"Entry": EntryModule}, xsrf_cookies=True, cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", login_url="/auth/login", debug=True, ) super().__init__(handlers, **settings) class BaseHandler(tornado.web.RequestHandler): def row_to_obj(self, row, cur): """Convert a SQL row to an object supporting dict and attribute access.""" obj = tornado.util.ObjectDict() for val, desc in zip(row, cur.description): obj[desc.name] = val return obj async def execute(self, stmt, *args): """Execute a SQL statement. Must be called with ``await self.execute(...)`` """ with (await self.application.db.cursor()) as cur: await cur.execute(stmt, args) async def query(self, stmt, *args): """Query for a list of results. Typical usage:: results = await self.query(...) Or:: for row in await self.query(...) """ with (await self.application.db.cursor()) as cur: await cur.execute(stmt, args) return [self.row_to_obj(row, cur) for row in await cur.fetchall()] async def queryone(self, stmt, *args): """Query for exactly one result. Raises NoResultError if there are no results, or ValueError if there are more than one. """ results = await self.query(stmt, *args) if len(results) == 0: raise NoResultError() elif len(results) > 1: raise ValueError("Expected 1 result, got %d" % len(results)) return results[0] async def prepare(self): # get_current_user cannot be a coroutine, so set # self.current_user in prepare instead. user_id = self.get_secure_cookie("blogdemo_user") if user_id: self.current_user = await self.queryone( "SELECT * FROM authors WHERE id = %s", int(user_id) ) async def any_author_exists(self): return bool(await self.query("SELECT * FROM authors LIMIT 1")) class HomeHandler(BaseHandler): async def get(self): entries = await self.query( "SELECT * FROM entries ORDER BY published DESC LIMIT 5" ) if not entries: self.redirect("/compose") return self.render("home.html", entries=entries) class EntryHandler(BaseHandler): async def get(self, slug): entry = await self.queryone("SELECT * FROM entries WHERE slug = %s", slug) if not entry: raise tornado.web.HTTPError(404) self.render("entry.html", entry=entry) class ArchiveHandler(BaseHandler): async def get(self): entries = await self.query("SELECT * FROM entries ORDER BY published DESC") self.render("archive.html", entries=entries) class FeedHandler(BaseHandler): async def get(self): entries = await self.query( "SELECT * FROM entries ORDER BY published DESC LIMIT 10" ) self.set_header("Content-Type", "application/atom+xml") self.render("feed.xml", entries=entries) class ComposeHandler(BaseHandler): @tornado.web.authenticated async def get(self): id = self.get_argument("id", None) entry = None if id: entry = await self.queryone("SELECT * FROM entries WHERE id = %s", int(id)) self.render("compose.html", entry=entry) @tornado.web.authenticated async def post(self): id = self.get_argument("id", None) title = self.get_argument("title") text = self.get_argument("markdown") html = markdown.markdown(text) if id: try: entry = await self.queryone( "SELECT * FROM entries WHERE id = %s", int(id) ) except NoResultError: raise tornado.web.HTTPError(404) slug = entry.slug await self.execute( "UPDATE entries SET title = %s, markdown = %s, html = %s " "WHERE id = %s", title, text, html, int(id), ) else: slug = unicodedata.normalize("NFKD", title) slug = re.sub(r"[^\w]+", " ", slug) slug = "-".join(slug.lower().strip().split()) slug = slug.encode("ascii", "ignore").decode("ascii") if not slug: slug = "entry" while True: e = await self.query("SELECT * FROM entries WHERE slug = %s", slug) if not e: break slug += "-2" await self.execute( "INSERT INTO entries (author_id,title,slug,markdown,html,published,updated)" "VALUES (%s,%s,%s,%s,%s,CURRENT_TIMESTAMP,CURRENT_TIMESTAMP)", self.current_user.id, title, slug, text, html, ) self.redirect("/entry/" + slug) class AuthCreateHandler(BaseHandler): def get(self): self.render("create_author.html") async def post(self): if await self.any_author_exists(): raise tornado.web.HTTPError(400, "author already created") hashed_password = await tornado.ioloop.IOLoop.current().run_in_executor( None, bcrypt.hashpw, tornado.escape.utf8(self.get_argument("password")), bcrypt.gensalt(), ) author = await self.queryone( "INSERT INTO authors (email, name, hashed_password) " "VALUES (%s, %s, %s) RETURNING id", self.get_argument("email"), self.get_argument("name"), tornado.escape.to_unicode(hashed_password), ) self.set_secure_cookie("blogdemo_user", str(author.id)) self.redirect(self.get_argument("next", "/")) class AuthLoginHandler(BaseHandler): async def get(self): # If there are no authors, redirect to the account creation page. if not await self.any_author_exists(): self.redirect("/auth/create") else: self.render("login.html", error=None) async def post(self): try: author = await self.queryone( "SELECT * FROM authors WHERE email = %s", self.get_argument("email") ) except NoResultError: self.render("login.html", error="email not found") return password_equal = await tornado.ioloop.IOLoop.current().run_in_executor( None, bcrypt.checkpw, tornado.escape.utf8(self.get_argument("password")), tornado.escape.utf8(author.hashed_password), ) if password_equal: self.set_secure_cookie("blogdemo_user", str(author.id)) self.redirect(self.get_argument("next", "/")) else: self.render("login.html", error="incorrect password") class AuthLogoutHandler(BaseHandler): def get(self): self.clear_cookie("blogdemo_user") self.redirect(self.get_argument("next", "/")) class EntryModule(tornado.web.UIModule): def render(self, entry): return self.render_string("modules/entry.html", entry=entry) async def main(): tornado.options.parse_command_line() # Create the global connection pool. async with aiopg.create_pool( host=options.db_host, port=options.db_port, user=options.db_user, password=options.db_password, dbname=options.db_database, ) as db: await maybe_create_tables(db) app = Application(db) app.listen(options.port) # In this demo the server will simply run until interrupted # with Ctrl-C, but if you want to shut down more gracefully, # call shutdown_event.set(). shutdown_event = tornado.locks.Event() await shutdown_event.wait() if __name__ == "__main__": tornado.ioloop.IOLoop.current().run_sync(main) tornado-6.1.0/demos/blog/docker-compose.yml000066400000000000000000000003621374705040500206610ustar00rootroot00000000000000postgres: image: postgres:10.3 environment: POSTGRES_USER: blog POSTGRES_PASSWORD: blog POSTGRES_DB: blog ports: - "3306" blog: build: . links: - postgres ports: - "8888:8888" command: --db_host=postgres tornado-6.1.0/demos/blog/requirements.txt000066400000000000000000000000471374705040500205100ustar00rootroot00000000000000aiopg bcrypt markdown psycopg2 tornado tornado-6.1.0/demos/blog/schema.sql000066400000000000000000000025121374705040500172040ustar00rootroot00000000000000-- Copyright 2009 FriendFeed -- -- Licensed under the Apache License, Version 2.0 (the "License"); you may -- not use this file except in compliance with the License. You may obtain -- a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -- License for the specific language governing permissions and limitations -- under the License. -- To create the database: -- CREATE DATABASE blog; -- CREATE USER blog WITH PASSWORD 'blog'; -- GRANT ALL ON DATABASE blog TO blog; -- -- To reload the tables: -- psql -U blog -d blog < schema.sql DROP TABLE IF EXISTS authors; CREATE TABLE authors ( id SERIAL PRIMARY KEY, email VARCHAR(100) NOT NULL UNIQUE, name VARCHAR(100) NOT NULL, hashed_password VARCHAR(100) NOT NULL ); DROP TABLE IF EXISTS entries; CREATE TABLE entries ( id SERIAL PRIMARY KEY, author_id INT NOT NULL REFERENCES authors(id), slug VARCHAR(100) NOT NULL UNIQUE, title VARCHAR(512) NOT NULL, markdown TEXT NOT NULL, html TEXT NOT NULL, published TIMESTAMP NOT NULL, updated TIMESTAMP NOT NULL ); CREATE INDEX ON entries (published); tornado-6.1.0/demos/blog/static/000077500000000000000000000000001374705040500165125ustar00rootroot00000000000000tornado-6.1.0/demos/blog/static/blog.css000066400000000000000000000041221374705040500201460ustar00rootroot00000000000000/* * Copyright 2009 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ body { background: white; color: black; margin: 15px; margin-top: 0; } body, input, textarea { font-family: Georgia, serif; font-size: 12pt; } table { border-collapse: collapse; border: 0; } td { border: 0; padding: 0; } h1, h2, h3, h4 { font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; margin: 0; } h1 { font-size: 20pt; } pre, code { font-family: monospace; color: #060; } pre { margin-left: 1em; padding-left: 1em; border-left: 1px solid silver; line-height: 14pt; } a, a code { color: #00c; } #body { max-width: 800px; margin: auto; } #header { background-color: #3b5998; padding: 5px; padding-left: 10px; padding-right: 10px; margin-bottom: 1em; } #header, #header a { color: white; } #header h1 a { text-decoration: none; } #footer, #content { margin-left: 10px; margin-right: 10px; } #footer { margin-top: 3em; } .entry h1 a { color: black; text-decoration: none; } .entry { margin-bottom: 2em; } .entry .date { margin-top: 3px; } .entry p { margin: 0; margin-bottom: 1em; } .entry .body { margin-top: 1em; line-height: 16pt; } .compose td { vertical-align: middle; padding-bottom: 5px; } .compose td.field { padding-right: 10px; } .compose .title, .compose .submit { font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; font-weight: bold; } .compose .title { font-size: 20pt; } .compose .title, .compose .markdown { width: 100%; } .compose .markdown { height: 500px; line-height: 16pt; } tornado-6.1.0/demos/blog/templates/000077500000000000000000000000001374705040500172215ustar00rootroot00000000000000tornado-6.1.0/demos/blog/templates/archive.html000066400000000000000000000012411374705040500215260ustar00rootroot00000000000000{% extends "base.html" %} {% block head %} {% end %} {% block body %}
    {% for entry in entries %}
  • {{ locale.format_date(entry.published, full_format=True, shorter=True) }}
  • {% end %}
{% end %} tornado-6.1.0/demos/blog/templates/base.html000066400000000000000000000020041374705040500210150ustar00rootroot00000000000000 {{ escape(handler.settings["blog_title"]) }} {% block head %}{% end %}
{% block body %}{% end %}
{% block bottom %}{% end %} tornado-6.1.0/demos/blog/templates/compose.html000066400000000000000000000030151374705040500215530ustar00rootroot00000000000000{% extends "base.html" %} {% block body %}
{% if entry %} {% end %} {% module xsrf_form_html() %}
{% end %} {% block bottom %} {% end %} tornado-6.1.0/demos/blog/templates/create_author.html000066400000000000000000000004631374705040500227370ustar00rootroot00000000000000{% extends "base.html" %} {% block body %}
Email:
Name:
Password:
{% module xsrf_form_html() %}
{% end %} tornado-6.1.0/demos/blog/templates/entry.html000066400000000000000000000001221374705040500212430ustar00rootroot00000000000000{% extends "base.html" %} {% block body %} {% module Entry(entry) %} {% end %} tornado-6.1.0/demos/blog/templates/feed.xml000066400000000000000000000025011374705040500206440ustar00rootroot00000000000000 {% set date_format = "%Y-%m-%dT%H:%M:%SZ" %} {{ handler.settings["blog_title"] }} {% if len(entries) > 0 %} {{ max(e.updated for e in entries).strftime(date_format) }} {% else %} {{ datetime.datetime.utcnow().strftime(date_format) }} {% end %} http://{{ request.host }}/ {{ handler.settings["blog_title"] }} {% for entry in entries %} http://{{ request.host }}/entry/{{ entry.slug }} {{ entry.title }} {{ entry.updated.strftime(date_format) }} {{ entry.published.strftime(date_format) }}
{% raw entry.html %}
{% end %}
tornado-6.1.0/demos/blog/templates/home.html000066400000000000000000000002641374705040500210410ustar00rootroot00000000000000{% extends "base.html" %} {% block body %} {% for entry in entries %} {% module Entry(entry) %} {% end %} {% end %} tornado-6.1.0/demos/blog/templates/login.html000066400000000000000000000005261374705040500212220ustar00rootroot00000000000000{% extends "base.html" %} {% block body %} {% if error %} Error: {{ error }}

{% end %}

Email:
Password:
{% module xsrf_form_html() %}
{% end %} tornado-6.1.0/demos/blog/templates/modules/000077500000000000000000000000001374705040500206715ustar00rootroot00000000000000tornado-6.1.0/demos/blog/templates/modules/entry.html000066400000000000000000000005651374705040500227260ustar00rootroot00000000000000

{{ entry.title }}

{{ locale.format_date(entry.published, full_format=True, shorter=True) }}
{% raw entry.html %}
{% if current_user %} {% end %}
tornado-6.1.0/demos/chat/000077500000000000000000000000001374705040500152175ustar00rootroot00000000000000tornado-6.1.0/demos/chat/chatdemo.py000077500000000000000000000101251374705040500173570ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import asyncio import tornado.escape import tornado.ioloop import tornado.locks import tornado.web import os.path import uuid from tornado.options import define, options, parse_command_line define("port", default=8888, help="run on the given port", type=int) define("debug", default=True, help="run in debug mode") class MessageBuffer(object): def __init__(self): # cond is notified whenever the message cache is updated self.cond = tornado.locks.Condition() self.cache = [] self.cache_size = 200 def get_messages_since(self, cursor): """Returns a list of messages newer than the given cursor. ``cursor`` should be the ``id`` of the last message received. """ results = [] for msg in reversed(self.cache): if msg["id"] == cursor: break results.append(msg) results.reverse() return results def add_message(self, message): self.cache.append(message) if len(self.cache) > self.cache_size: self.cache = self.cache[-self.cache_size :] self.cond.notify_all() # Making this a non-singleton is left as an exercise for the reader. global_message_buffer = MessageBuffer() class MainHandler(tornado.web.RequestHandler): def get(self): self.render("index.html", messages=global_message_buffer.cache) class MessageNewHandler(tornado.web.RequestHandler): """Post a new message to the chat room.""" def post(self): message = {"id": str(uuid.uuid4()), "body": self.get_argument("body")} # render_string() returns a byte string, which is not supported # in json, so we must convert it to a character string. message["html"] = tornado.escape.to_unicode( self.render_string("message.html", message=message) ) if self.get_argument("next", None): self.redirect(self.get_argument("next")) else: self.write(message) global_message_buffer.add_message(message) class MessageUpdatesHandler(tornado.web.RequestHandler): """Long-polling request for new messages. Waits until new messages are available before returning anything. """ async def post(self): cursor = self.get_argument("cursor", None) messages = global_message_buffer.get_messages_since(cursor) while not messages: # Save the Future returned here so we can cancel it in # on_connection_close. self.wait_future = global_message_buffer.cond.wait() try: await self.wait_future except asyncio.CancelledError: return messages = global_message_buffer.get_messages_since(cursor) if self.request.connection.stream.closed(): return self.write(dict(messages=messages)) def on_connection_close(self): self.wait_future.cancel() def main(): parse_command_line() app = tornado.web.Application( [ (r"/", MainHandler), (r"/a/message/new", MessageNewHandler), (r"/a/message/updates", MessageUpdatesHandler), ], cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), xsrf_cookies=True, debug=options.debug, ) app.listen(options.port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main() tornado-6.1.0/demos/chat/static/000077500000000000000000000000001374705040500165065ustar00rootroot00000000000000tornado-6.1.0/demos/chat/static/chat.css000066400000000000000000000017351374705040500201450ustar00rootroot00000000000000/* * Copyright 2009 FriendFeed * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ body { background: white; margin: 10px; } body, input { font-family: sans-serif; font-size: 10pt; color: black; } table { border-collapse: collapse; border: 0; } td { border: 0; padding: 0; } #body { position: absolute; bottom: 10px; left: 10px; } #input { margin-top: 0.5em; } #inbox .message { padding-top: 0.25em; } #nav { float: right; z-index: 99; } tornado-6.1.0/demos/chat/static/chat.js000066400000000000000000000077011374705040500177700ustar00rootroot00000000000000// Copyright 2009 FriendFeed // // Licensed under the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. $(document).ready(function() { if (!window.console) window.console = {}; if (!window.console.log) window.console.log = function() {}; $("#messageform").on("submit", function() { newMessage($(this)); return false; }); $("#messageform").on("keypress", function(e) { if (e.keyCode == 13) { newMessage($(this)); return false; } return true; }); $("#message").select(); updater.poll(); }); function newMessage(form) { var message = form.formToDict(); var disabled = form.find("input[type=submit]"); disabled.disable(); $.postJSON("/a/message/new", message, function(response) { updater.showMessage(response); if (message.id) { form.parent().remove(); } else { form.find("input[type=text]").val("").select(); disabled.enable(); } }); } function getCookie(name) { var r = document.cookie.match("\\b" + name + "=([^;]*)\\b"); return r ? r[1] : undefined; } jQuery.postJSON = function(url, args, callback) { args._xsrf = getCookie("_xsrf"); $.ajax({url: url, data: $.param(args), dataType: "text", type: "POST", success: function(response) { if (callback) callback(eval("(" + response + ")")); }, error: function(response) { console.log("ERROR:", response); }}); }; jQuery.fn.formToDict = function() { var fields = this.serializeArray(); var json = {}; for (var i = 0; i < fields.length; i++) { json[fields[i].name] = fields[i].value; } if (json.next) delete json.next; return json; }; jQuery.fn.disable = function() { this.enable(false); return this; }; jQuery.fn.enable = function(opt_enable) { if (arguments.length && !opt_enable) { this.attr("disabled", "disabled"); } else { this.removeAttr("disabled"); } return this; }; var updater = { errorSleepTime: 500, cursor: null, poll: function() { var args = {"_xsrf": getCookie("_xsrf")}; if (updater.cursor) args.cursor = updater.cursor; $.ajax({url: "/a/message/updates", type: "POST", dataType: "text", data: $.param(args), success: updater.onSuccess, error: updater.onError}); }, onSuccess: function(response) { try { updater.newMessages(eval("(" + response + ")")); } catch (e) { updater.onError(); return; } updater.errorSleepTime = 500; window.setTimeout(updater.poll, 0); }, onError: function(response) { updater.errorSleepTime *= 2; console.log("Poll error; sleeping for", updater.errorSleepTime, "ms"); window.setTimeout(updater.poll, updater.errorSleepTime); }, newMessages: function(response) { if (!response.messages) return; var messages = response.messages; updater.cursor = messages[messages.length - 1].id; console.log(messages.length, "new messages, cursor:", updater.cursor); for (var i = 0; i < messages.length; i++) { updater.showMessage(messages[i]); } }, showMessage: function(message) { var existing = $("#m" + message.id); if (existing.length > 0) return; var node = $(message.html); node.hide(); $("#inbox").append(node); node.slideDown(); } }; tornado-6.1.0/demos/chat/templates/000077500000000000000000000000001374705040500172155ustar00rootroot00000000000000tornado-6.1.0/demos/chat/templates/index.html000066400000000000000000000021471374705040500212160ustar00rootroot00000000000000 Tornado Chat Demo
{% for message in messages %} {% module Template("message.html", message=message) %} {% end %}
{% module xsrf_form_html() %}
tornado-6.1.0/demos/chat/templates/message.html000066400000000000000000000001331374705040500215240ustar00rootroot00000000000000
{% module linkify(message["body"]) %}
tornado-6.1.0/demos/facebook/000077500000000000000000000000001374705040500160515ustar00rootroot00000000000000tornado-6.1.0/demos/facebook/README000066400000000000000000000006171374705040500167350ustar00rootroot00000000000000Running the Tornado Facebook example ==================================== To run this example, you must register a Facebook application with a Connect URL set to the domain the this demo will be running on (i.e. http://localhost:8888/ by default). The API key and secret for this application must be passed on the command line: python facebook.py --facebook_api_key=ABC --facebook_secret=XYZ tornado-6.1.0/demos/facebook/facebook.py000077500000000000000000000102201374705040500201720ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import tornado.auth import tornado.escape import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web from tornado.options import define, options define("port", default=8888, help="run on the given port", type=int) define("facebook_api_key", help="your Facebook application API key", type=str) define("facebook_secret", help="your Facebook application secret", type=str) class Application(tornado.web.Application): def __init__(self): handlers = [ (r"/", MainHandler), (r"/auth/login", AuthLoginHandler), (r"/auth/logout", AuthLogoutHandler), ] settings = dict( cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", login_url="/auth/login", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), xsrf_cookies=True, facebook_api_key=options.facebook_api_key, facebook_secret=options.facebook_secret, ui_modules={"Post": PostModule}, debug=True, autoescape=None, ) tornado.web.Application.__init__(self, handlers, **settings) class BaseHandler(tornado.web.RequestHandler): def get_current_user(self): user_json = self.get_secure_cookie("fbdemo_user") if not user_json: return None return tornado.escape.json_decode(user_json) class MainHandler(BaseHandler, tornado.auth.FacebookGraphMixin): @tornado.web.authenticated async def get(self): stream = await self.facebook_request( "/me/home", self._on_stream, access_token=self.current_user["access_token"] ) if stream is None: # Session may have expired self.redirect("/auth/login") return self.render("stream.html", stream=stream) class AuthLoginHandler(BaseHandler, tornado.auth.FacebookGraphMixin): async def get(self): my_url = ( self.request.protocol + "://" + self.request.host + "/auth/login?next=" + tornado.escape.url_escape(self.get_argument("next", "/")) ) if self.get_argument("code", False): user = await self.get_authenticated_user( redirect_uri=my_url, client_id=self.settings["facebook_api_key"], client_secret=self.settings["facebook_secret"], code=self.get_argument("code"), ) self.set_secure_cookie("fbdemo_user", tornado.escape.json_encode(user)) self.redirect(self.get_argument("next", "/")) return self.authorize_redirect( redirect_uri=my_url, client_id=self.settings["facebook_api_key"], extra_params={"scope": "user_posts"}, ) class AuthLogoutHandler(BaseHandler, tornado.auth.FacebookGraphMixin): def get(self): self.clear_cookie("fbdemo_user") self.redirect(self.get_argument("next", "/")) class PostModule(tornado.web.UIModule): def render(self, post): return self.render_string("modules/post.html", post=post) def main(): tornado.options.parse_command_line() if not (options.facebook_api_key and options.facebook_secret): print("--facebook_api_key and --facebook_secret must be set") return http_server = tornado.httpserver.HTTPServer(Application()) http_server.listen(options.port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main() tornado-6.1.0/demos/facebook/static/000077500000000000000000000000001374705040500173405ustar00rootroot00000000000000tornado-6.1.0/demos/facebook/static/facebook.css000066400000000000000000000027241374705040500216300ustar00rootroot00000000000000/* * Copyright 2009 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ body { background: white; color: black; margin: 15px; } body, input, textarea { font-family: "Lucida Grande", Tahoma, Verdana, sans-serif; font-size: 10pt; } table { border-collapse: collapse; border: 0; } td { border: 0; padding: 0; } img { border: 0; } a { text-decoration: none; color: #3b5998; } a:hover { text-decoration: underline; } .post { border-bottom: 1px solid #eeeeee; min-height: 50px; padding-bottom: 10px; margin-top: 10px; } .post .picture { float: left; } .post .picture img { height: 50px; width: 50px; } .post .body { margin-left: 60px; } .post .media img { border: 1px solid #cccccc; padding: 3px; } .post .media:hover img { border: 1px solid #3b5998; } .post a.actor { font-weight: bold; } .post .meta { font-size: 11px; } .post a.permalink { color: #777777; } #body { max-width: 700px; margin: auto; } tornado-6.1.0/demos/facebook/static/facebook.js000066400000000000000000000000001374705040500214350ustar00rootroot00000000000000tornado-6.1.0/demos/facebook/templates/000077500000000000000000000000001374705040500200475ustar00rootroot00000000000000tornado-6.1.0/demos/facebook/templates/modules/000077500000000000000000000000001374705040500215175ustar00rootroot00000000000000tornado-6.1.0/demos/facebook/templates/modules/post.html000066400000000000000000000013761374705040500234010ustar00rootroot00000000000000
{% set author_url="http://www.facebook.com/profile.php?id=" + escape(post["from"]["id"]) %}
{{ escape(post["from"]["name"]) }} {% if "message" in post %} {{ escape(post["message"]) }} {% end %}
tornado-6.1.0/demos/facebook/templates/stream.html000066400000000000000000000011631374705040500222310ustar00rootroot00000000000000 Tornado Facebook Stream Demo
{{ escape(current_user["name"]) }} - {{ _("Sign out") }}
{% for post in stream["data"] %} {{ modules.Post(post) }} {% end %}
tornado-6.1.0/demos/file_upload/000077500000000000000000000000001374705040500165635ustar00rootroot00000000000000tornado-6.1.0/demos/file_upload/file_receiver.py000077500000000000000000000031631374705040500217460ustar00rootroot00000000000000#!/usr/bin/env python """Usage: python file_receiver.py Demonstrates a server that receives a multipart-form-encoded set of files in an HTTP POST, or streams in the raw data of a single file in an HTTP PUT. See file_uploader.py in this directory for code that uploads files in this format. """ import logging try: from urllib.parse import unquote except ImportError: # Python 2. from urllib import unquote import tornado.ioloop import tornado.web from tornado import options class POSTHandler(tornado.web.RequestHandler): def post(self): for field_name, files in self.request.files.items(): for info in files: filename, content_type = info["filename"], info["content_type"] body = info["body"] logging.info( 'POST "%s" "%s" %d bytes', filename, content_type, len(body) ) self.write("OK") @tornado.web.stream_request_body class PUTHandler(tornado.web.RequestHandler): def initialize(self): self.bytes_read = 0 def data_received(self, chunk): self.bytes_read += len(chunk) def put(self, filename): filename = unquote(filename) mtype = self.request.headers.get("Content-Type") logging.info('PUT "%s" "%s" %d bytes', filename, mtype, self.bytes_read) self.write("OK") def make_app(): return tornado.web.Application([(r"/post", POSTHandler), (r"/(.*)", PUTHandler)]) if __name__ == "__main__": # Tornado configures logging. options.parse_command_line() app = make_app() app.listen(8888) tornado.ioloop.IOLoop.current().start() tornado-6.1.0/demos/file_upload/file_uploader.py000077500000000000000000000067061374705040500217630ustar00rootroot00000000000000#!/usr/bin/env python """Usage: python file_uploader.py [--put] file1.txt file2.png ... Demonstrates uploading files to a server, without concurrency. It can either POST a multipart-form-encoded request containing one or more files, or PUT a single file without encoding. See also file_receiver.py in this directory, a server that receives uploads. """ import mimetypes import os import sys from functools import partial from uuid import uuid4 try: from urllib.parse import quote except ImportError: # Python 2. from urllib import quote from tornado import gen, httpclient, ioloop from tornado.options import define, options # Using HTTP POST, upload one or more files in a single multipart-form-encoded # request. @gen.coroutine def multipart_producer(boundary, filenames, write): boundary_bytes = boundary.encode() for filename in filenames: filename_bytes = filename.encode() mtype = mimetypes.guess_type(filename)[0] or "application/octet-stream" buf = ( (b"--%s\r\n" % boundary_bytes) + ( b'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (filename_bytes, filename_bytes) ) + (b"Content-Type: %s\r\n" % mtype.encode()) + b"\r\n" ) yield write(buf) with open(filename, "rb") as f: while True: # 16k at a time. chunk = f.read(16 * 1024) if not chunk: break yield write(chunk) yield write(b"\r\n") yield write(b"--%s--\r\n" % (boundary_bytes,)) # Using HTTP PUT, upload one raw file. This is preferred for large files since # the server can stream the data instead of buffering it entirely in memory. @gen.coroutine def post(filenames): client = httpclient.AsyncHTTPClient() boundary = uuid4().hex headers = {"Content-Type": "multipart/form-data; boundary=%s" % boundary} producer = partial(multipart_producer, boundary, filenames) response = yield client.fetch( "http://localhost:8888/post", method="POST", headers=headers, body_producer=producer, ) print(response) @gen.coroutine def raw_producer(filename, write): with open(filename, "rb") as f: while True: # 16K at a time. chunk = f.read(16 * 1024) if not chunk: # Complete. break yield write(chunk) @gen.coroutine def put(filenames): client = httpclient.AsyncHTTPClient() for filename in filenames: mtype = mimetypes.guess_type(filename)[0] or "application/octet-stream" headers = {"Content-Type": mtype} producer = partial(raw_producer, filename) url_path = quote(os.path.basename(filename)) response = yield client.fetch( "http://localhost:8888/%s" % url_path, method="PUT", headers=headers, body_producer=producer, ) print(response) if __name__ == "__main__": define("put", type=bool, help="Use PUT instead of POST", group="file uploader") # Tornado configures logging from command line opts and returns remaining args. filenames = options.parse_command_line() if not filenames: print("Provide a list of filenames to upload.", file=sys.stderr) sys.exit(1) method = put if options.put else post ioloop.IOLoop.current().run_sync(lambda: method(filenames)) tornado-6.1.0/demos/helloworld/000077500000000000000000000000001374705040500164535ustar00rootroot00000000000000tornado-6.1.0/demos/helloworld/helloworld.py000077500000000000000000000022671374705040500212120ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web from tornado.options import define, options define("port", default=8888, help="run on the given port", type=int) class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") def main(): tornado.options.parse_command_line() application = tornado.web.Application([(r"/", MainHandler)]) http_server = tornado.httpserver.HTTPServer(application) http_server.listen(options.port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main() tornado-6.1.0/demos/s3server/000077500000000000000000000000001374705040500160545ustar00rootroot00000000000000tornado-6.1.0/demos/s3server/s3server.py000066400000000000000000000234341374705040500202100ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of an S3-like storage server based on local files. Useful to test features that will eventually run on S3, or if you want to run something locally that was once running on S3. We don't support all the features of S3, but it does work with the standard S3 client for the most basic semantics. To use the standard S3 client with this module: c = S3.AWSAuthConnection("", "", server="localhost", port=8888, is_secure=False) c.create_bucket("mybucket") c.put("mybucket", "mykey", "a value") print c.get("mybucket", "mykey").body """ import bisect import datetime import hashlib import os import os.path import urllib from tornado import escape from tornado import httpserver from tornado import ioloop from tornado import web from tornado.util import unicode_type from tornado.options import options, define try: long except NameError: long = int define("port", default=9888, help="TCP port to listen on") define("root_directory", default="/tmp/s3", help="Root storage directory") define("bucket_depth", default=0, help="Bucket file system depth limit") def start(port, root_directory, bucket_depth): """Starts the mock S3 server on the given port at the given path.""" application = S3Application(root_directory, bucket_depth) http_server = httpserver.HTTPServer(application) http_server.listen(port) ioloop.IOLoop.current().start() class S3Application(web.Application): """Implementation of an S3-like storage server based on local files. If bucket depth is given, we break files up into multiple directories to prevent hitting file system limits for number of files in each directories. 1 means one level of directories, 2 means 2, etc. """ def __init__(self, root_directory, bucket_depth=0): web.Application.__init__( self, [ (r"/", RootHandler), (r"/([^/]+)/(.+)", ObjectHandler), (r"/([^/]+)/", BucketHandler), ], ) self.directory = os.path.abspath(root_directory) if not os.path.exists(self.directory): os.makedirs(self.directory) self.bucket_depth = bucket_depth class BaseRequestHandler(web.RequestHandler): SUPPORTED_METHODS = ("PUT", "GET", "DELETE") def render_xml(self, value): assert isinstance(value, dict) and len(value) == 1 self.set_header("Content-Type", "application/xml; charset=UTF-8") name = list(value.keys())[0] parts = [] parts.append("<" + name + ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">') self._render_parts(value[name], parts) parts.append("") self.finish('\n' + "".join(parts)) def _render_parts(self, value, parts=[]): if isinstance(value, (unicode_type, bytes)): parts.append(escape.xhtml_escape(value)) elif isinstance(value, (int, long)): parts.append(str(value)) elif isinstance(value, datetime.datetime): parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z")) elif isinstance(value, dict): for name, subvalue in value.items(): if not isinstance(subvalue, list): subvalue = [subvalue] for subsubvalue in subvalue: parts.append("<" + name + ">") self._render_parts(subsubvalue, parts) parts.append("") else: raise Exception("Unknown S3 value type %r", value) def _object_path(self, bucket, object_name): if self.application.bucket_depth < 1: return os.path.abspath( os.path.join(self.application.directory, bucket, object_name) ) hash = hashlib.md5(object_name).hexdigest() path = os.path.abspath(os.path.join(self.application.directory, bucket)) for i in range(self.application.bucket_depth): path = os.path.join(path, hash[: 2 * (i + 1)]) return os.path.join(path, object_name) class RootHandler(BaseRequestHandler): def get(self): names = os.listdir(self.application.directory) buckets = [] for name in names: path = os.path.join(self.application.directory, name) info = os.stat(path) buckets.append( { "Name": name, "CreationDate": datetime.datetime.utcfromtimestamp(info.st_ctime), } ) self.render_xml({"ListAllMyBucketsResult": {"Buckets": {"Bucket": buckets}}}) class BucketHandler(BaseRequestHandler): def get(self, bucket_name): prefix = self.get_argument("prefix", u"") marker = self.get_argument("marker", u"") max_keys = int(self.get_argument("max-keys", 50000)) path = os.path.abspath(os.path.join(self.application.directory, bucket_name)) terse = int(self.get_argument("terse", 0)) if not path.startswith(self.application.directory) or not os.path.isdir(path): raise web.HTTPError(404) object_names = [] for root, dirs, files in os.walk(path): for file_name in files: object_names.append(os.path.join(root, file_name)) skip = len(path) + 1 for i in range(self.application.bucket_depth): skip += 2 * (i + 1) + 1 object_names = [n[skip:] for n in object_names] object_names.sort() contents = [] start_pos = 0 if marker: start_pos = bisect.bisect_right(object_names, marker, start_pos) if prefix: start_pos = bisect.bisect_left(object_names, prefix, start_pos) truncated = False for object_name in object_names[start_pos:]: if not object_name.startswith(prefix): break if len(contents) >= max_keys: truncated = True break object_path = self._object_path(bucket_name, object_name) c = {"Key": object_name} if not terse: info = os.stat(object_path) c.update( { "LastModified": datetime.datetime.utcfromtimestamp( info.st_mtime ), "Size": info.st_size, } ) contents.append(c) marker = object_name self.render_xml( { "ListBucketResult": { "Name": bucket_name, "Prefix": prefix, "Marker": marker, "MaxKeys": max_keys, "IsTruncated": truncated, "Contents": contents, } } ) def put(self, bucket_name): path = os.path.abspath(os.path.join(self.application.directory, bucket_name)) if not path.startswith(self.application.directory) or os.path.exists(path): raise web.HTTPError(403) os.makedirs(path) self.finish() def delete(self, bucket_name): path = os.path.abspath(os.path.join(self.application.directory, bucket_name)) if not path.startswith(self.application.directory) or not os.path.isdir(path): raise web.HTTPError(404) if len(os.listdir(path)) > 0: raise web.HTTPError(403) os.rmdir(path) self.set_status(204) self.finish() class ObjectHandler(BaseRequestHandler): def get(self, bucket, object_name): object_name = urllib.unquote(object_name) path = self._object_path(bucket, object_name) if not path.startswith(self.application.directory) or not os.path.isfile(path): raise web.HTTPError(404) info = os.stat(path) self.set_header("Content-Type", "application/unknown") self.set_header( "Last-Modified", datetime.datetime.utcfromtimestamp(info.st_mtime) ) with open(path, "rb") as object_file: self.finish(object_file.read()) def put(self, bucket, object_name): object_name = urllib.unquote(object_name) bucket_dir = os.path.abspath(os.path.join(self.application.directory, bucket)) if not bucket_dir.startswith(self.application.directory) or not os.path.isdir( bucket_dir ): raise web.HTTPError(404) path = self._object_path(bucket, object_name) if not path.startswith(bucket_dir) or os.path.isdir(path): raise web.HTTPError(403) directory = os.path.dirname(path) if not os.path.exists(directory): os.makedirs(directory) with open(path, "w") as object_file: object_file.write(self.request.body) self.finish() def delete(self, bucket, object_name): object_name = urllib.unquote(object_name) path = self._object_path(bucket, object_name) if not path.startswith(self.application.directory) or not os.path.isfile(path): raise web.HTTPError(404) os.unlink(path) self.set_status(204) self.finish() if __name__ == "__main__": options.parse_command_line() start(options.port, options.root_directory, options.bucket_depth) tornado-6.1.0/demos/tcpecho/000077500000000000000000000000001374705040500157255ustar00rootroot00000000000000tornado-6.1.0/demos/tcpecho/README.md000066400000000000000000000011341374705040500172030ustar00rootroot00000000000000TCP echo demo ============= This demo shows how to use Tornado's asynchronous TCP client and server by implementing `handle_stream` as a coroutine. To run the server: ``` $ python server.py ``` The client will send the message given with the `--message` option (which defaults to "ping"), wait for a response, then quit. To run: ``` $ python client.py --message="your message here" ``` Alternatively, you can interactively send messages to the echo server with a telnet client. For example: ``` $ telnet localhost 9888 Trying ::1... Connected to localhost. Escape character is '^]'. ping ping ``` tornado-6.1.0/demos/tcpecho/client.py000077500000000000000000000013701374705040500175610ustar00rootroot00000000000000#!/usr/bin/env python from tornado.ioloop import IOLoop from tornado import gen from tornado.tcpclient import TCPClient from tornado.options import options, define define("host", default="localhost", help="TCP server host") define("port", default=9888, help="TCP port to connect to") define("message", default="ping", help="Message to send") @gen.coroutine def send_message(): stream = yield TCPClient().connect(options.host, options.port) yield stream.write((options.message + "\n").encode()) print("Sent to server:", options.message) reply = yield stream.read_until(b"\n") print("Response from server:", reply.decode().strip()) if __name__ == "__main__": options.parse_command_line() IOLoop.current().run_sync(send_message) tornado-6.1.0/demos/tcpecho/server.py000077500000000000000000000021011374705040500176020ustar00rootroot00000000000000#!/usr/bin/env python import logging from tornado.ioloop import IOLoop from tornado import gen from tornado.iostream import StreamClosedError from tornado.tcpserver import TCPServer from tornado.options import options, define define("port", default=9888, help="TCP port to listen on") logger = logging.getLogger(__name__) class EchoServer(TCPServer): @gen.coroutine def handle_stream(self, stream, address): while True: try: data = yield stream.read_until(b"\n") logger.info("Received bytes: %s", data) if not data.endswith(b"\n"): data = data + b"\n" yield stream.write(data) except StreamClosedError: logger.warning("Lost client at host %s", address[0]) break except Exception as e: print(e) if __name__ == "__main__": options.parse_command_line() server = EchoServer() server.listen(options.port) logger.info("Listening on TCP port %d", options.port) IOLoop.current().start() tornado-6.1.0/demos/twitter/000077500000000000000000000000001374705040500160025ustar00rootroot00000000000000tornado-6.1.0/demos/twitter/home.html000066400000000000000000000003601374705040500176170ustar00rootroot00000000000000 Tornado Twitter Demo
    {% for tweet in timeline %}
  • {{ tweet['user']['screen_name'] }}: {{ tweet['text'] }}
  • {% end %}
tornado-6.1.0/demos/twitter/twitterdemo.py000077500000000000000000000066211374705040500207330ustar00rootroot00000000000000#!/usr/bin/env python """A simplistic Twitter viewer to demonstrate the use of TwitterMixin. To run this app, you must first register an application with Twitter: 1) Go to https://dev.twitter.com/apps and create an application. Your application must have a callback URL registered with Twitter. It doesn't matter what it is, but it has to be there (Twitter won't let you use localhost in a registered callback URL, but that won't stop you from running this demo on localhost). 2) Create a file called "secrets.cfg" and put your consumer key and secret (which Twitter gives you when you register an app) in it: twitter_consumer_key = 'asdf1234' twitter_consumer_secret = 'qwer5678' (you could also generate a random value for "cookie_secret" and put it in the same file, although it's not necessary to run this demo) 3) Run this program and go to http://localhost:8888 (by default) in your browser. """ import logging from tornado.auth import TwitterMixin from tornado.escape import json_decode, json_encode from tornado.ioloop import IOLoop from tornado import gen from tornado.options import define, options, parse_command_line, parse_config_file from tornado.web import Application, RequestHandler, authenticated define("port", default=8888, help="port to listen on") define( "config_file", default="secrets.cfg", help="filename for additional configuration" ) define( "debug", default=False, group="application", help="run in debug mode (with automatic reloading)", ) # The following settings should probably be defined in secrets.cfg define("twitter_consumer_key", type=str, group="application") define("twitter_consumer_secret", type=str, group="application") define( "cookie_secret", type=str, group="application", default="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE__", help="signing key for secure cookies", ) class BaseHandler(RequestHandler): COOKIE_NAME = "twitterdemo_user" def get_current_user(self): user_json = self.get_secure_cookie(self.COOKIE_NAME) if not user_json: return None return json_decode(user_json) class MainHandler(BaseHandler, TwitterMixin): @authenticated @gen.coroutine def get(self): timeline = yield self.twitter_request( "/statuses/home_timeline", access_token=self.current_user["access_token"] ) self.render("home.html", timeline=timeline) class LoginHandler(BaseHandler, TwitterMixin): @gen.coroutine def get(self): if self.get_argument("oauth_token", None): user = yield self.get_authenticated_user() del user["description"] self.set_secure_cookie(self.COOKIE_NAME, json_encode(user)) self.redirect(self.get_argument("next", "/")) else: yield self.authorize_redirect(callback_uri=self.request.full_url()) class LogoutHandler(BaseHandler): def get(self): self.clear_cookie(self.COOKIE_NAME) def main(): parse_command_line(final=False) parse_config_file(options.config_file) app = Application( [("/", MainHandler), ("/login", LoginHandler), ("/logout", LogoutHandler)], login_url="/login", **options.group_dict("application") ) app.listen(options.port) logging.info("Listening on http://localhost:%d" % options.port) IOLoop.current().start() if __name__ == "__main__": main() tornado-6.1.0/demos/websocket/000077500000000000000000000000001374705040500162665ustar00rootroot00000000000000tornado-6.1.0/demos/websocket/chatdemo.py000077500000000000000000000060161374705040500204320ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Simplified chat demo for websockets. Authentication, error handling, etc are left as an exercise for the reader :) """ import logging import tornado.escape import tornado.ioloop import tornado.options import tornado.web import tornado.websocket import os.path import uuid from tornado.options import define, options define("port", default=8888, help="run on the given port", type=int) class Application(tornado.web.Application): def __init__(self): handlers = [(r"/", MainHandler), (r"/chatsocket", ChatSocketHandler)] settings = dict( cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), xsrf_cookies=True, ) super().__init__(handlers, **settings) class MainHandler(tornado.web.RequestHandler): def get(self): self.render("index.html", messages=ChatSocketHandler.cache) class ChatSocketHandler(tornado.websocket.WebSocketHandler): waiters = set() cache = [] cache_size = 200 def get_compression_options(self): # Non-None enables compression with default options. return {} def open(self): ChatSocketHandler.waiters.add(self) def on_close(self): ChatSocketHandler.waiters.remove(self) @classmethod def update_cache(cls, chat): cls.cache.append(chat) if len(cls.cache) > cls.cache_size: cls.cache = cls.cache[-cls.cache_size :] @classmethod def send_updates(cls, chat): logging.info("sending message to %d waiters", len(cls.waiters)) for waiter in cls.waiters: try: waiter.write_message(chat) except: logging.error("Error sending message", exc_info=True) def on_message(self, message): logging.info("got message %r", message) parsed = tornado.escape.json_decode(message) chat = {"id": str(uuid.uuid4()), "body": parsed["body"]} chat["html"] = tornado.escape.to_basestring( self.render_string("message.html", message=chat) ) ChatSocketHandler.update_cache(chat) ChatSocketHandler.send_updates(chat) def main(): tornado.options.parse_command_line() app = Application() app.listen(options.port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main() tornado-6.1.0/demos/websocket/static/000077500000000000000000000000001374705040500175555ustar00rootroot00000000000000tornado-6.1.0/demos/websocket/static/chat.css000066400000000000000000000017351374705040500212140ustar00rootroot00000000000000/* * Copyright 2009 FriendFeed * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ body { background: white; margin: 10px; } body, input { font-family: sans-serif; font-size: 10pt; color: black; } table { border-collapse: collapse; border: 0; } td { border: 0; padding: 0; } #body { position: absolute; bottom: 10px; left: 10px; } #input { margin-top: 0.5em; } #inbox .message { padding-top: 0.25em; } #nav { float: right; z-index: 99; } tornado-6.1.0/demos/websocket/static/chat.js000066400000000000000000000037351374705040500210420ustar00rootroot00000000000000// Copyright 2009 FriendFeed // // Licensed under the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. $(document).ready(function() { if (!window.console) window.console = {}; if (!window.console.log) window.console.log = function() {}; $("#messageform").on("submit", function() { newMessage($(this)); return false; }); $("#messageform").on("keypress", function(e) { if (e.keyCode == 13) { newMessage($(this)); return false; } }); $("#message").select(); updater.start(); }); function newMessage(form) { var message = form.formToDict(); updater.socket.send(JSON.stringify(message)); form.find("input[type=text]").val("").select(); } jQuery.fn.formToDict = function() { var fields = this.serializeArray(); var json = {} for (var i = 0; i < fields.length; i++) { json[fields[i].name] = fields[i].value; } if (json.next) delete json.next; return json; }; var updater = { socket: null, start: function() { var url = "ws://" + location.host + "/chatsocket"; updater.socket = new WebSocket(url); updater.socket.onmessage = function(event) { updater.showMessage(JSON.parse(event.data)); } }, showMessage: function(message) { var existing = $("#m" + message.id); if (existing.length > 0) return; var node = $(message.html); node.hide(); $("#inbox").append(node); node.slideDown(); } }; tornado-6.1.0/demos/websocket/templates/000077500000000000000000000000001374705040500202645ustar00rootroot00000000000000tornado-6.1.0/demos/websocket/templates/index.html000066400000000000000000000021141374705040500222570ustar00rootroot00000000000000 Tornado Chat Demo
{% for message in messages %} {% include "message.html" %} {% end %}
{% module xsrf_form_html() %}
tornado-6.1.0/demos/websocket/templates/message.html000066400000000000000000000001331374705040500225730ustar00rootroot00000000000000
{% module linkify(message["body"]) %}
tornado-6.1.0/demos/webspider/000077500000000000000000000000001374705040500162645ustar00rootroot00000000000000tornado-6.1.0/demos/webspider/webspider.py000077500000000000000000000052631374705040500206330ustar00rootroot00000000000000#!/usr/bin/env python3 import time from datetime import timedelta from html.parser import HTMLParser from urllib.parse import urljoin, urldefrag from tornado import gen, httpclient, ioloop, queues base_url = "http://www.tornadoweb.org/en/stable/" concurrency = 10 async def get_links_from_url(url): """Download the page at `url` and parse it for links. Returned links have had the fragment after `#` removed, and have been made absolute so, e.g. the URL 'gen.html#tornado.gen.coroutine' becomes 'http://www.tornadoweb.org/en/stable/gen.html'. """ response = await httpclient.AsyncHTTPClient().fetch(url) print("fetched %s" % url) html = response.body.decode(errors="ignore") return [urljoin(url, remove_fragment(new_url)) for new_url in get_links(html)] def remove_fragment(url): pure_url, frag = urldefrag(url) return pure_url def get_links(html): class URLSeeker(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.urls = [] def handle_starttag(self, tag, attrs): href = dict(attrs).get("href") if href and tag == "a": self.urls.append(href) url_seeker = URLSeeker() url_seeker.feed(html) return url_seeker.urls async def main(): q = queues.Queue() start = time.time() fetching, fetched, dead = set(), set(), set() async def fetch_url(current_url): if current_url in fetching: return print("fetching %s" % current_url) fetching.add(current_url) urls = await get_links_from_url(current_url) fetched.add(current_url) for new_url in urls: # Only follow links beneath the base URL if new_url.startswith(base_url): await q.put(new_url) async def worker(): async for url in q: if url is None: return try: await fetch_url(url) except Exception as e: print("Exception: %s %s" % (e, url)) dead.add(url) finally: q.task_done() await q.put(base_url) # Start workers, then wait for the work queue to be empty. workers = gen.multi([worker() for _ in range(concurrency)]) await q.join(timeout=timedelta(seconds=300)) assert fetching == (fetched | dead) print("Done in %d seconds, fetched %s URLs." % (time.time() - start, len(fetched))) print("Unable to fetch %s URLS." % len(dead)) # Signal all the workers to exit. for _ in range(concurrency): await q.put(None) await workers if __name__ == "__main__": io_loop = ioloop.IOLoop.current() io_loop.run_sync(main) tornado-6.1.0/docs/000077500000000000000000000000001374705040500141215ustar00rootroot00000000000000tornado-6.1.0/docs/Makefile000066400000000000000000000014541374705040500155650ustar00rootroot00000000000000.PHONY: all all: sphinx # No -W for doctests because that disallows tests with empty output. SPHINX_DOCTEST_OPTS=-n -d build/doctress . SPHINXOPTS=-n -W -d build/doctrees . .PHONY: sphinx sphinx: sphinx-build -b html $(SPHINXOPTS) build/html .PHONY: coverage coverage: sphinx-build -b coverage ${SPHINXOPTS} build/coverage cat build/coverage/python.txt .PHONY: latex latex: sphinx-build -b latex $(SPHINXOPTS) build/latex # Building a pdf requires a latex installation. For macports, the needed # packages are texlive-latex-extra and texlive-fonts-recommended. # The output is in build/latex/tornado.pdf .PHONY: pdf pdf: latex cd build/latex && pdflatex -interaction=nonstopmode tornado.tex .PHONY: doctest doctest: sphinx-build -b doctest $(SPHINX_DOCTEST_OPTS) build/doctest clean: rm -rf build tornado-6.1.0/docs/asyncio.rst000066400000000000000000000003071374705040500163200ustar00rootroot00000000000000``tornado.platform.asyncio`` --- Bridge between ``asyncio`` and Tornado ======================================================================= .. automodule:: tornado.platform.asyncio :members: tornado-6.1.0/docs/auth.rst000066400000000000000000000022171374705040500156160ustar00rootroot00000000000000``tornado.auth`` --- Third-party login with OpenID and OAuth ============================================================ .. testsetup:: import tornado.auth, tornado.gen, tornado.web .. automodule:: tornado.auth Common protocols ---------------- These classes implement the OpenID and OAuth standards. They will generally need to be subclassed to use them with any particular site. The degree of customization required will vary, but in most cases overriding the class attributes (which are named beginning with underscores for historical reasons) should be sufficient. .. autoclass:: OpenIdMixin :members: .. autoclass:: OAuthMixin .. automethod:: authorize_redirect .. automethod:: get_authenticated_user .. automethod:: _oauth_consumer_token .. automethod:: _oauth_get_user_future .. automethod:: get_auth_http_client .. autoclass:: OAuth2Mixin :members: Google ------ .. autoclass:: GoogleOAuth2Mixin :members: Facebook -------- .. autoclass:: FacebookGraphMixin :members: Twitter ------- .. autoclass:: TwitterMixin :members: tornado-6.1.0/docs/autoreload.rst000066400000000000000000000003111374705040500170050ustar00rootroot00000000000000``tornado.autoreload`` --- Automatically detect code changes in development =========================================================================== .. automodule:: tornado.autoreload :members: tornado-6.1.0/docs/caresresolver.rst000066400000000000000000000014651374705040500175400ustar00rootroot00000000000000``tornado.platform.caresresolver`` --- Asynchronous DNS Resolver using C-Ares ============================================================================= .. module:: tornado.platform.caresresolver This module contains a DNS resolver using the c-ares library (and its wrapper ``pycares``). .. py:class:: CaresResolver Name resolver based on the c-ares library. This is a non-blocking and non-threaded resolver. It may not produce the same results as the system resolver, but can be used for non-blocking resolution when threads cannot be used. c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``, so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is the default for ``tornado.simple_httpclient``, but other libraries may default to ``AF_UNSPEC``. tornado-6.1.0/docs/concurrent.rst000066400000000000000000000022661374705040500170430ustar00rootroot00000000000000``tornado.concurrent`` --- Work with ``Future`` objects ======================================================= .. testsetup:: from tornado.concurrent import * from tornado import gen .. automodule:: tornado.concurrent :members: .. class:: Future ``tornado.concurrent.Future`` is an alias for `asyncio.Future`. In Tornado, the main way in which applications interact with ``Future`` objects is by ``awaiting`` or ``yielding`` them in coroutines, instead of calling methods on the ``Future`` objects themselves. For more information on the available methods, see the `asyncio.Future` docs. .. versionchanged:: 5.0 Tornado's implementation of ``Future`` has been replaced by the version from `asyncio` when available. - ``Future`` objects can only be created while there is a current `.IOLoop` - The timing of callbacks scheduled with ``Future.add_done_callback`` has changed. - Cancellation is now partially supported (only on Python 3) - The ``exc_info`` and ``set_exc_info`` methods are no longer available on Python 3. tornado-6.1.0/docs/conf.py000066400000000000000000000044261374705040500154260ustar00rootroot00000000000000# Ensure we get the local copy of tornado instead of what's on the standard path import os import sys sys.path.insert(0, os.path.abspath("..")) import tornado master_doc = "index" project = "Tornado" copyright = "The Tornado Authors" version = release = tornado.version extensions = [ "sphinx.ext.autodoc", "sphinx.ext.coverage", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinxcontrib.asyncio", ] primary_domain = "py" default_role = "py:obj" autodoc_member_order = "bysource" autoclass_content = "both" autodoc_inherit_docstrings = False # Without this line sphinx includes a copy of object.__init__'s docstring # on any class that doesn't define __init__. # https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__ autodoc_docstring_signature = False coverage_skip_undoc_in_source = True coverage_ignore_modules = [ "tornado.platform.asyncio", "tornado.platform.caresresolver", "tornado.platform.twisted", "tornado.simple_httpclient", ] # I wish this could go in a per-module file... coverage_ignore_classes = [ # tornado.gen "Runner", # tornado.web "ChunkedTransferEncoding", "GZipContentEncoding", "OutputTransform", "TemplateModule", "url", # tornado.websocket "WebSocketProtocol", "WebSocketProtocol13", "WebSocketProtocol76", ] coverage_ignore_functions = [ # various modules "doctests", "main", # tornado.escape # parse_qs_bytes should probably be documented but it's complicated by # having different implementations between py2 and py3. "parse_qs_bytes", # tornado.gen "Multi", ] html_favicon = "favicon.ico" latex_documents = [ ( "index", "tornado.tex", "Tornado Documentation", "The Tornado Authors", "manual", False, ) ] intersphinx_mapping = {"python": ("https://docs.python.org/3.6/", None)} on_rtd = os.environ.get("READTHEDOCS", None) == "True" # On RTD we can't import sphinx_rtd_theme, but it will be applied by # default anyway. This block will use the same theme when building locally # as on RTD. if not on_rtd: import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] tornado-6.1.0/docs/coroutine.rst000066400000000000000000000001521374705040500166600ustar00rootroot00000000000000Coroutines and concurrency ========================== .. toctree:: gen locks queues process tornado-6.1.0/docs/escape.rst000066400000000000000000000016201374705040500161120ustar00rootroot00000000000000``tornado.escape`` --- Escaping and string manipulation ======================================================= .. automodule:: tornado.escape Escaping functions ------------------ .. autofunction:: xhtml_escape .. autofunction:: xhtml_unescape .. autofunction:: url_escape .. autofunction:: url_unescape .. autofunction:: json_encode .. autofunction:: json_decode Byte/unicode conversions ------------------------ .. autofunction:: utf8 .. autofunction:: to_unicode .. function:: native_str .. function:: to_basestring Converts a byte or unicode string into type `str`. These functions were used to help transition from Python 2 to Python 3 but are now deprecated aliases for `to_unicode`. .. autofunction:: recursive_unicode Miscellaneous functions ----------------------- .. autofunction:: linkify .. autofunction:: squeeze tornado-6.1.0/docs/faq.rst000066400000000000000000000073071374705040500154310ustar00rootroot00000000000000Frequently Asked Questions ========================== .. contents:: :local: Why isn't this example with ``time.sleep()`` running in parallel? ----------------------------------------------------------------- Many people's first foray into Tornado's concurrency looks something like this:: class BadExampleHandler(RequestHandler): def get(self): for i in range(5): print(i) time.sleep(1) Fetch this handler twice at the same time and you'll see that the second five-second countdown doesn't start until the first one has completely finished. The reason for this is that `time.sleep` is a **blocking** function: it doesn't allow control to return to the `.IOLoop` so that other handlers can be run. Of course, `time.sleep` is really just a placeholder in these examples, the point is to show what happens when something in a handler gets slow. No matter what the real code is doing, to achieve concurrency blocking code must be replaced with non-blocking equivalents. This means one of three things: 1. *Find a coroutine-friendly equivalent.* For `time.sleep`, use `tornado.gen.sleep` (or `asyncio.sleep`) instead:: class CoroutineSleepHandler(RequestHandler): async def get(self): for i in range(5): print(i) await gen.sleep(1) When this option is available, it is usually the best approach. See the `Tornado wiki `_ for links to asynchronous libraries that may be useful. 2. *Find a callback-based equivalent.* Similar to the first option, callback-based libraries are available for many tasks, although they are slightly more complicated to use than a library designed for coroutines. Adapt the callback-based function into a future:: class CoroutineTimeoutHandler(RequestHandler): async def get(self): io_loop = IOLoop.current() for i in range(5): print(i) f = tornado.concurrent.Future() do_something_with_callback(f.set_result) result = await f Again, the `Tornado wiki `_ can be useful to find suitable libraries. 3. *Run the blocking code on another thread.* When asynchronous libraries are not available, `concurrent.futures.ThreadPoolExecutor` can be used to run any blocking code on another thread. This is a universal solution that can be used for any blocking function whether an asynchronous counterpart exists or not:: class ThreadPoolHandler(RequestHandler): async def get(self): for i in range(5): print(i) await IOLoop.current().run_in_executor(None, time.sleep, 1) See the :doc:`Asynchronous I/O ` chapter of the Tornado user's guide for more on blocking and asynchronous functions. My code is asynchronous. Why is it not running in parallel in two browser tabs? ------------------------------------------------------------------------------- Even when a handler is asynchronous and non-blocking, it can be surprisingly tricky to verify this. Browsers will recognize that you are trying to load the same page in two different tabs and delay the second request until the first has finished. To work around this and see that the server is in fact working in parallel, do one of two things: * Add something to your urls to make them unique. Instead of ``http://localhost:8888`` in both tabs, load ``http://localhost:8888/?x=1`` in one and ``http://localhost:8888/?x=2`` in the other. * Use two different browsers. For example, Firefox will be able to load a url even while that same url is being loaded in a Chrome tab. tornado-6.1.0/docs/favicon.ico000066400000000000000000000017761374705040500162550ustar00rootroot00000000000000 è(  ëçÇæÜ´†åÚ±çæÝµsèåÀìéÉíêËëéÊ èã¾OÞÆ—Ï×±{ÿÛ¼Šÿ娱 ëêÉÿÿÿëéÊ3ãÔ¬¨ÝÓÿ×°zÿáË þáÍ¢þßÇ›ÿèݺRøÿõÿÿÿîëÏLå×°ŒàÌ ÛÚ»‰ÿÚºˆÿãѪþéàÀÿíìÐþÝÁ”ÿæ×²áëåÆùÿôìçË‚áÌ îÞÇ™ÿßÇšÿÞÖþßÅ™þèÛ¹ÿíèÌÿçܺÿêáÂþßĘÿìæÊwððÙðíÕSãѪýàÊŸÿëäÇþîèÍþíçËÿëâÄÿîéÎÿðíÕÿæØµÿàÉ þÝ¿‘ÿìâÆÎíçÍîèÌÇÝÆ–ÿïëÕþòðÛÿòðÛÿòòÞÿòòÞÿòñÜÿòðÜÿòïÚÿìâÇÿÙ¶‚ÿíãËÓëàÆêâÅüãÓ«ÿóóâÿòñÞÿíæÍÿåÕ¯ÿèÛºÿñíØÿôóâÿôóáÿñîÚÿèØ·ÿèÙ¹ÿòïÝ#íåÊü娱ÿôóäÿðêÔÿßÌžÿÝÇ–ÿÛÁŒÿßÈšÿïèÑÿö÷éÿïèÑÿç×¶þßÇœÿõóãuñíÙÔâÖ©ÿïêÒþêàÁÿå×±ÿâÒ©ÿÝÈ•ÿÛÄÿéÜ»ÿöôçÿùúñÿèÙºþàÊ¢ÿ÷õé‘ööêuçàºÿëæÈþéß½ÿêâÃÿèÞ¾ÿâÓ§ÿÞÌ›ÿëáÄÿ÷ôèþöóåþåÖ°ÿç×µÿ÷õëYøøíòñÞÙæá¸ÿíèËþìæÉÿêãÃÿëãÅÿæÜ¸ÿìäÈþïéÓÿìãÉÿèÛºïõðãŸùùñúùô?òðÙæéå¿ÿëçÆþïëÒþëæÆÿíèÌÿðêÔÿóîÜÌóîÝ}üýùMÿÿÿüüø?õóâÐìéÈÿèä»ÿïìÏþðîÕÿíêÌÿ÷õçëÿÿþ$ùùëÿþþùøï…ôóáùìèÅÿåà³ÿæã·ÿ÷öèåÿÿÿ&ýýúüüûÿþýÿÿÿÿÿÿüüùýýúlúúò¿øöê÷ûúõ–ÿÿÿþ|þøà € €€€À<à<ð<þ<tornado-6.1.0/docs/gen.rst000066400000000000000000000020271374705040500154250ustar00rootroot00000000000000``tornado.gen`` --- Generator-based coroutines ============================================== .. testsetup:: from tornado.web import * from tornado import gen .. automodule:: tornado.gen Decorators ---------- .. autofunction:: coroutine .. autoexception:: Return Utility functions ----------------- .. autofunction:: with_timeout(timeout: Union[float, datetime.timedelta], future: Yieldable, quiet_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]] = ()) .. autofunction:: sleep .. autoclass:: WaitIterator :members: .. autofunction:: multi(Union[List[Yieldable], Dict[Any, Yieldable]], quiet_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]] = ()) .. autofunction:: multi_future(Union[List[Yieldable], Dict[Any, Yieldable]], quiet_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]] = ()) .. autofunction:: convert_yielded .. autofunction:: maybe_future .. autofunction:: is_coroutine_function .. autodata:: moment :annotation: tornado-6.1.0/docs/guide.rst000066400000000000000000000002641374705040500157520ustar00rootroot00000000000000User's guide ============ .. toctree:: guide/intro guide/async guide/coroutines guide/queues guide/structure guide/templates guide/security guide/running tornado-6.1.0/docs/guide/000077500000000000000000000000001374705040500152165ustar00rootroot00000000000000tornado-6.1.0/docs/guide/async.rst000066400000000000000000000110501374705040500170620ustar00rootroot00000000000000Asynchronous and non-Blocking I/O --------------------------------- Real-time web features require a long-lived mostly-idle connection per user. In a traditional synchronous web server, this implies devoting one thread to each user, which can be very expensive. To minimize the cost of concurrent connections, Tornado uses a single-threaded event loop. This means that all application code should aim to be asynchronous and non-blocking because only one operation can be active at a time. The terms asynchronous and non-blocking are closely related and are often used interchangeably, but they are not quite the same thing. Blocking ~~~~~~~~ A function **blocks** when it waits for something to happen before returning. A function may block for many reasons: network I/O, disk I/O, mutexes, etc. In fact, *every* function blocks, at least a little bit, while it is running and using the CPU (for an extreme example that demonstrates why CPU blocking must be taken as seriously as other kinds of blocking, consider password hashing functions like `bcrypt `_, which by design use hundreds of milliseconds of CPU time, far more than a typical network or disk access). A function can be blocking in some respects and non-blocking in others. In the context of Tornado we generally talk about blocking in the context of network I/O, although all kinds of blocking are to be minimized. Asynchronous ~~~~~~~~~~~~ An **asynchronous** function returns before it is finished, and generally causes some work to happen in the background before triggering some future action in the application (as opposed to normal **synchronous** functions, which do everything they are going to do before returning). There are many styles of asynchronous interfaces: * Callback argument * Return a placeholder (`.Future`, ``Promise``, ``Deferred``) * Deliver to a queue * Callback registry (e.g. POSIX signals) Regardless of which type of interface is used, asynchronous functions *by definition* interact differently with their callers; there is no free way to make a synchronous function asynchronous in a way that is transparent to its callers (systems like `gevent `_ use lightweight threads to offer performance comparable to asynchronous systems, but they do not actually make things asynchronous). Asynchronous operations in Tornado generally return placeholder objects (``Futures``), with the exception of some low-level components like the `.IOLoop` that use callbacks. ``Futures`` are usually transformed into their result with the ``await`` or ``yield`` keywords. Examples ~~~~~~~~ Here is a sample synchronous function: .. testcode:: from tornado.httpclient import HTTPClient def synchronous_fetch(url): http_client = HTTPClient() response = http_client.fetch(url) return response.body .. testoutput:: :hide: And here is the same function rewritten asynchronously as a native coroutine: .. testcode:: from tornado.httpclient import AsyncHTTPClient async def asynchronous_fetch(url): http_client = AsyncHTTPClient() response = await http_client.fetch(url) return response.body .. testoutput:: :hide: Or for compatibility with older versions of Python, using the `tornado.gen` module: .. testcode:: from tornado.httpclient import AsyncHTTPClient from tornado import gen @gen.coroutine def async_fetch_gen(url): http_client = AsyncHTTPClient() response = yield http_client.fetch(url) raise gen.Return(response.body) Coroutines are a little magical, but what they do internally is something like this: .. testcode:: from tornado.concurrent import Future def async_fetch_manual(url): http_client = AsyncHTTPClient() my_future = Future() fetch_future = http_client.fetch(url) def on_fetch(f): my_future.set_result(f.result().body) fetch_future.add_done_callback(on_fetch) return my_future .. testoutput:: :hide: Notice that the coroutine returns its `.Future` before the fetch is done. This is what makes coroutines *asynchronous*. Anything you can do with coroutines you can also do by passing callback objects around, but coroutines provide an important simplification by letting you organize your code in the same way you would if it were synchronous. This is especially important for error handling, since ``try``/``except`` blocks work as you would expect in coroutines while this is difficult to achieve with callbacks. Coroutines will be discussed in depth in the next section of this guide. tornado-6.1.0/docs/guide/coroutines.rst000066400000000000000000000254051374705040500201500ustar00rootroot00000000000000Coroutines ========== .. testsetup:: from tornado import gen **Coroutines** are the recommended way to write asynchronous code in Tornado. Coroutines use the Python ``await`` or ``yield`` keyword to suspend and resume execution instead of a chain of callbacks (cooperative lightweight threads as seen in frameworks like `gevent `_ are sometimes called coroutines as well, but in Tornado all coroutines use explicit context switches and are called as asynchronous functions). Coroutines are almost as simple as synchronous code, but without the expense of a thread. They also `make concurrency easier `_ to reason about by reducing the number of places where a context switch can happen. Example:: async def fetch_coroutine(url): http_client = AsyncHTTPClient() response = await http_client.fetch(url) return response.body .. _native_coroutines: Native vs decorated coroutines ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Python 3.5 introduced the ``async`` and ``await`` keywords (functions using these keywords are also called "native coroutines"). For compatibility with older versions of Python, you can use "decorated" or "yield-based" coroutines using the `tornado.gen.coroutine` decorator. Native coroutines are the recommended form whenever possible. Only use decorated coroutines when compatibility with older versions of Python is required. Examples in the Tornado documentation will generally use the native form. Translation between the two forms is generally straightforward:: # Decorated: # Native: # Normal function declaration # with decorator # "async def" keywords @gen.coroutine def a(): async def a(): # "yield" all async funcs # "await" all async funcs b = yield c() b = await c() # "return" and "yield" # cannot be mixed in # Python 2, so raise a # special exception. # Return normally raise gen.Return(b) return b Other differences between the two forms of coroutine are outlined below. - Native coroutines: - are generally faster. - can use ``async for`` and ``async with`` statements which make some patterns much simpler. - do not run at all unless you ``await`` or ``yield`` them. Decorated coroutines can start running "in the background" as soon as they are called. Note that for both kinds of coroutines it is important to use ``await`` or ``yield`` so that any exceptions have somewhere to go. - Decorated coroutines: - have additional integration with the `concurrent.futures` package, allowing the result of ``executor.submit`` to be yielded directly. For native coroutines, use `.IOLoop.run_in_executor` instead. - support some shorthand for waiting on multiple objects by yielding a list or dict. Use `tornado.gen.multi` to do this in native coroutines. - can support integration with other packages including Twisted via a registry of conversion functions. To access this functionality in native coroutines, use `tornado.gen.convert_yielded`. - always return a `.Future` object. Native coroutines return an *awaitable* object that is not a `.Future`. In Tornado the two are mostly interchangeable. How it works ~~~~~~~~~~~~ This section explains the operation of decorated coroutines. Native coroutines are conceptually similar, but a little more complicated because of the extra integration with the Python runtime. A function containing ``yield`` is a **generator**. All generators are asynchronous; when called they return a generator object instead of running to completion. The ``@gen.coroutine`` decorator communicates with the generator via the ``yield`` expressions, and with the coroutine's caller by returning a `.Future`. Here is a simplified version of the coroutine decorator's inner loop:: # Simplified inner loop of tornado.gen.Runner def run(self): # send(x) makes the current yield return x. # It returns when the next yield is reached future = self.gen.send(self.next) def callback(f): self.next = f.result() self.run() future.add_done_callback(callback) The decorator receives a `.Future` from the generator, waits (without blocking) for that `.Future` to complete, then "unwraps" the `.Future` and sends the result back into the generator as the result of the ``yield`` expression. Most asynchronous code never touches the `.Future` class directly except to immediately pass the `.Future` returned by an asynchronous function to a ``yield`` expression. How to call a coroutine ~~~~~~~~~~~~~~~~~~~~~~~ Coroutines do not raise exceptions in the normal way: any exception they raise will be trapped in the awaitable object until it is yielded. This means it is important to call coroutines in the right way, or you may have errors that go unnoticed:: async def divide(x, y): return x / y def bad_call(): # This should raise a ZeroDivisionError, but it won't because # the coroutine is called incorrectly. divide(1, 0) In nearly all cases, any function that calls a coroutine must be a coroutine itself, and use the ``await`` or ``yield`` keyword in the call. When you are overriding a method defined in a superclass, consult the documentation to see if coroutines are allowed (the documentation should say that the method "may be a coroutine" or "may return a `.Future`"):: async def good_call(): # await will unwrap the object returned by divide() and raise # the exception. await divide(1, 0) Sometimes you may want to "fire and forget" a coroutine without waiting for its result. In this case it is recommended to use `.IOLoop.spawn_callback`, which makes the `.IOLoop` responsible for the call. If it fails, the `.IOLoop` will log a stack trace:: # The IOLoop will catch the exception and print a stack trace in # the logs. Note that this doesn't look like a normal call, since # we pass the function object to be called by the IOLoop. IOLoop.current().spawn_callback(divide, 1, 0) Using `.IOLoop.spawn_callback` in this way is *recommended* for functions using ``@gen.coroutine``, but it is *required* for functions using ``async def`` (otherwise the coroutine runner will not start). Finally, at the top level of a program, *if the IOLoop is not yet running,* you can start the `.IOLoop`, run the coroutine, and then stop the `.IOLoop` with the `.IOLoop.run_sync` method. This is often used to start the ``main`` function of a batch-oriented program:: # run_sync() doesn't take arguments, so we must wrap the # call in a lambda. IOLoop.current().run_sync(lambda: divide(1, 0)) Coroutine patterns ~~~~~~~~~~~~~~~~~~ Calling blocking functions ^^^^^^^^^^^^^^^^^^^^^^^^^^ The simplest way to call a blocking function from a coroutine is to use `.IOLoop.run_in_executor`, which returns ``Futures`` that are compatible with coroutines:: async def call_blocking(): await IOLoop.current().run_in_executor(None, blocking_func, args) Parallelism ^^^^^^^^^^^ The `.multi` function accepts lists and dicts whose values are ``Futures``, and waits for all of those ``Futures`` in parallel: .. testcode:: from tornado.gen import multi async def parallel_fetch(url1, url2): resp1, resp2 = await multi([http_client.fetch(url1), http_client.fetch(url2)]) async def parallel_fetch_many(urls): responses = await multi ([http_client.fetch(url) for url in urls]) # responses is a list of HTTPResponses in the same order async def parallel_fetch_dict(urls): responses = await multi({url: http_client.fetch(url) for url in urls}) # responses is a dict {url: HTTPResponse} .. testoutput:: :hide: In decorated coroutines, it is possible to ``yield`` the list or dict directly:: @gen.coroutine def parallel_fetch_decorated(url1, url2): resp1, resp2 = yield [http_client.fetch(url1), http_client.fetch(url2)] Interleaving ^^^^^^^^^^^^ Sometimes it is useful to save a `.Future` instead of yielding it immediately, so you can start another operation before waiting. .. testcode:: from tornado.gen import convert_yielded async def get(self): # convert_yielded() starts the native coroutine in the background. # This is equivalent to asyncio.ensure_future() (both work in Tornado). fetch_future = convert_yielded(self.fetch_next_chunk()) while True: chunk = yield fetch_future if chunk is None: break self.write(chunk) fetch_future = convert_yielded(self.fetch_next_chunk()) yield self.flush() .. testoutput:: :hide: This is a little easier to do with decorated coroutines, because they start immediately when called: .. testcode:: @gen.coroutine def get(self): fetch_future = self.fetch_next_chunk() while True: chunk = yield fetch_future if chunk is None: break self.write(chunk) fetch_future = self.fetch_next_chunk() yield self.flush() .. testoutput:: :hide: Looping ^^^^^^^ In native coroutines, ``async for`` can be used. In older versions of Python, looping is tricky with coroutines since there is no way to ``yield`` on every iteration of a ``for`` or ``while`` loop and capture the result of the yield. Instead, you'll need to separate the loop condition from accessing the results, as in this example from `Motor `_:: import motor db = motor.MotorClient().test @gen.coroutine def loop_example(collection): cursor = db.collection.find() while (yield cursor.fetch_next): doc = cursor.next_object() Running in the background ^^^^^^^^^^^^^^^^^^^^^^^^^ `.PeriodicCallback` is not normally used with coroutines. Instead, a coroutine can contain a ``while True:`` loop and use `tornado.gen.sleep`:: async def minute_loop(): while True: await do_something() await gen.sleep(60) # Coroutines that loop forever are generally started with # spawn_callback(). IOLoop.current().spawn_callback(minute_loop) Sometimes a more complicated loop may be desirable. For example, the previous loop runs every ``60+N`` seconds, where ``N`` is the running time of ``do_something()``. To run exactly every 60 seconds, use the interleaving pattern from above:: async def minute_loop2(): while True: nxt = gen.sleep(60) # Start the clock. await do_something() # Run while the clock is ticking. await nxt # Wait for the timer to run out. tornado-6.1.0/docs/guide/intro.rst000066400000000000000000000033031374705040500171020ustar00rootroot00000000000000Introduction ------------ `Tornado `_ is a Python web framework and asynchronous networking library, originally developed at `FriendFeed `_. By using non-blocking network I/O, Tornado can scale to tens of thousands of open connections, making it ideal for `long polling `_, `WebSockets `_, and other applications that require a long-lived connection to each user. Tornado can be roughly divided into four major components: * A web framework (including `.RequestHandler` which is subclassed to create web applications, and various supporting classes). * Client- and server-side implementions of HTTP (`.HTTPServer` and `.AsyncHTTPClient`). * An asynchronous networking library including the classes `.IOLoop` and `.IOStream`, which serve as the building blocks for the HTTP components and can also be used to implement other protocols. * A coroutine library (`tornado.gen`) which allows asynchronous code to be written in a more straightforward way than chaining callbacks. This is similar to the native coroutine feature introduced in Python 3.5 (``async def``). Native coroutines are recommended in place of the `tornado.gen` module when available. The Tornado web framework and HTTP server together offer a full-stack alternative to `WSGI `_. While it is possible to use the Tornado HTTP server as a container for other WSGI frameworks (`.WSGIContainer`), this combination has limitations and to take full advantage of Tornado you will need to use Tornado's web framework and HTTP server together. tornado-6.1.0/docs/guide/queues.rst000066400000000000000000000023571374705040500172660ustar00rootroot00000000000000:class:`~tornado.queues.Queue` example - a concurrent web spider ================================================================ .. currentmodule:: tornado.queues Tornado's `tornado.queues` module implements an asynchronous producer / consumer pattern for coroutines, analogous to the pattern implemented for threads by the Python standard library's `queue` module. A coroutine that yields `Queue.get` pauses until there is an item in the queue. If the queue has a maximum size set, a coroutine that yields `Queue.put` pauses until there is room for another item. A `~Queue` maintains a count of unfinished tasks, which begins at zero. `~Queue.put` increments the count; `~Queue.task_done` decrements it. In the web-spider example here, the queue begins containing only base_url. When a worker fetches a page it parses the links and puts new ones in the queue, then calls `~Queue.task_done` to decrement the counter once. Eventually, a worker fetches a page whose URLs have all been seen before, and there is also no work left in the queue. Thus that worker's call to `~Queue.task_done` decrements the counter to zero. The main coroutine, which is waiting for `~Queue.join`, is unpaused and finishes. .. literalinclude:: ../../demos/webspider/webspider.py tornado-6.1.0/docs/guide/running.rst000066400000000000000000000247041374705040500174370ustar00rootroot00000000000000Running and deploying ===================== Since Tornado supplies its own HTTPServer, running and deploying it is a little different from other Python web frameworks. Instead of configuring a WSGI container to find your application, you write a ``main()`` function that starts the server: .. testcode:: def main(): app = make_app() app.listen(8888) IOLoop.current().start() if __name__ == '__main__': main() .. testoutput:: :hide: Configure your operating system or process manager to run this program to start the server. Please note that it may be necessary to increase the number of open files per process (to avoid "Too many open files"-Error). To raise this limit (setting it to 50000 for example) you can use the ``ulimit`` command, modify ``/etc/security/limits.conf`` or set ``minfds`` in your `supervisord `_ config. Processes and ports ~~~~~~~~~~~~~~~~~~~ Due to the Python GIL (Global Interpreter Lock), it is necessary to run multiple Python processes to take full advantage of multi-CPU machines. Typically it is best to run one process per CPU. Tornado includes a built-in multi-process mode to start several processes at once (note that multi-process mode does not work on Windows). This requires a slight alteration to the standard main function: .. testcode:: def main(): app = make_app() server = tornado.httpserver.HTTPServer(app) server.bind(8888) server.start(0) # forks one process per cpu IOLoop.current().start() .. testoutput:: :hide: This is the easiest way to start multiple processes and have them all share the same port, although it has some limitations. First, each child process will have its own ``IOLoop``, so it is important that nothing touches the global ``IOLoop`` instance (even indirectly) before the fork. Second, it is difficult to do zero-downtime updates in this model. Finally, since all the processes share the same port it is more difficult to monitor them individually. For more sophisticated deployments, it is recommended to start the processes independently, and have each one listen on a different port. The "process groups" feature of `supervisord `_ is one good way to arrange this. When each process uses a different port, an external load balancer such as HAProxy or nginx is usually needed to present a single address to outside visitors. Running behind a load balancer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When running behind a load balancer like `nginx `_, it is recommended to pass ``xheaders=True`` to the `.HTTPServer` constructor. This will tell Tornado to use headers like ``X-Real-IP`` to get the user's IP address instead of attributing all traffic to the balancer's IP address. This is a barebones nginx config file that is structurally similar to the one we use at FriendFeed. It assumes nginx and the Tornado servers are running on the same machine, and the four Tornado servers are running on ports 8000 - 8003:: user nginx; worker_processes 1; error_log /var/log/nginx/error.log; pid /var/run/nginx.pid; events { worker_connections 1024; use epoll; } http { # Enumerate all the Tornado servers here upstream frontends { server 127.0.0.1:8000; server 127.0.0.1:8001; server 127.0.0.1:8002; server 127.0.0.1:8003; } include /etc/nginx/mime.types; default_type application/octet-stream; access_log /var/log/nginx/access.log; keepalive_timeout 65; proxy_read_timeout 200; sendfile on; tcp_nopush on; tcp_nodelay on; gzip on; gzip_min_length 1000; gzip_proxied any; gzip_types text/plain text/html text/css text/xml application/x-javascript application/xml application/atom+xml text/javascript; # Only retry if there was a communication error, not a timeout # on the Tornado server (to avoid propagating "queries of death" # to all frontends) proxy_next_upstream error; server { listen 80; # Allow file uploads client_max_body_size 50M; location ^~ /static/ { root /var/www; if ($query_string) { expires max; } } location = /favicon.ico { rewrite (.*) /static/favicon.ico; } location = /robots.txt { rewrite (.*) /static/robots.txt; } location / { proxy_pass_header Server; proxy_set_header Host $http_host; proxy_redirect off; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Scheme $scheme; proxy_pass http://frontends; } } } Static files and aggressive file caching ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can serve static files from Tornado by specifying the ``static_path`` setting in your application:: settings = { "static_path": os.path.join(os.path.dirname(__file__), "static"), "cookie_secret": "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", "login_url": "/login", "xsrf_cookies": True, } application = tornado.web.Application([ (r"/", MainHandler), (r"/login", LoginHandler), (r"/(apple-touch-icon\.png)", tornado.web.StaticFileHandler, dict(path=settings['static_path'])), ], **settings) This setting will automatically make all requests that start with ``/static/`` serve from that static directory, e.g. ``http://localhost:8888/static/foo.png`` will serve the file ``foo.png`` from the specified static directory. We also automatically serve ``/robots.txt`` and ``/favicon.ico`` from the static directory (even though they don't start with the ``/static/`` prefix). In the above settings, we have explicitly configured Tornado to serve ``apple-touch-icon.png`` from the root with the `.StaticFileHandler`, though it is physically in the static file directory. (The capturing group in that regular expression is necessary to tell `.StaticFileHandler` the requested filename; recall that capturing groups are passed to handlers as method arguments.) You could do the same thing to serve e.g. ``sitemap.xml`` from the site root. Of course, you can also avoid faking a root ``apple-touch-icon.png`` by using the appropriate ```` tag in your HTML. To improve performance, it is generally a good idea for browsers to cache static resources aggressively so browsers won't send unnecessary ``If-Modified-Since`` or ``Etag`` requests that might block the rendering of the page. Tornado supports this out of the box with *static content versioning*. To use this feature, use the `~.RequestHandler.static_url` method in your templates rather than typing the URL of the static file directly in your HTML:: FriendFeed - {{ _("Home") }}
The ``static_url()`` function will translate that relative path to a URI that looks like ``/static/images/logo.png?v=aae54``. The ``v`` argument is a hash of the content in ``logo.png``, and its presence makes the Tornado server send cache headers to the user's browser that will make the browser cache the content indefinitely. Since the ``v`` argument is based on the content of the file, if you update a file and restart your server, it will start sending a new ``v`` value, so the user's browser will automatically fetch the new file. If the file's contents don't change, the browser will continue to use a locally cached copy without ever checking for updates on the server, significantly improving rendering performance. In production, you probably want to serve static files from a more optimized static file server like `nginx `_. You can configure almost any web server to recognize the version tags used by ``static_url()`` and set caching headers accordingly. Here is the relevant portion of the nginx configuration we use at FriendFeed:: location /static/ { root /var/friendfeed/static; if ($query_string) { expires max; } } .. _debug-mode: Debug mode and automatic reloading ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you pass ``debug=True`` to the ``Application`` constructor, the app will be run in debug/development mode. In this mode, several features intended for convenience while developing will be enabled (each of which is also available as an individual flag; if both are specified the individual flag takes precedence): * ``autoreload=True``: The app will watch for changes to its source files and reload itself when anything changes. This reduces the need to manually restart the server during development. However, certain failures (such as syntax errors at import time) can still take the server down in a way that debug mode cannot currently recover from. * ``compiled_template_cache=False``: Templates will not be cached. * ``static_hash_cache=False``: Static file hashes (used by the ``static_url`` function) will not be cached. * ``serve_traceback=True``: When an exception in a `.RequestHandler` is not caught, an error page including a stack trace will be generated. Autoreload mode is not compatible with the multi-process mode of `.HTTPServer`. You must not give `HTTPServer.start <.TCPServer.start>` an argument other than 1 (or call `tornado.process.fork_processes`) if you are using autoreload mode. The automatic reloading feature of debug mode is available as a standalone module in `tornado.autoreload`. The two can be used in combination to provide extra robustness against syntax errors: set ``autoreload=True`` within the app to detect changes while it is running, and start it with ``python -m tornado.autoreload myserver.py`` to catch any syntax errors or other errors at startup. Reloading loses any Python interpreter command-line arguments (e.g. ``-u``) because it re-executes Python using `sys.executable` and `sys.argv`. Additionally, modifying these variables will cause reloading to behave incorrectly. On some platforms (including Windows and Mac OSX prior to 10.6), the process cannot be updated "in-place", so when a code change is detected the old server exits and a new one starts. This has been known to confuse some IDEs. tornado-6.1.0/docs/guide/security.rst000066400000000000000000000311021374705040500176140ustar00rootroot00000000000000Authentication and security =========================== .. testsetup:: import tornado.auth import tornado.web Cookies and secure cookies ~~~~~~~~~~~~~~~~~~~~~~~~~~ You can set cookies in the user's browser with the ``set_cookie`` method: .. testcode:: class MainHandler(tornado.web.RequestHandler): def get(self): if not self.get_cookie("mycookie"): self.set_cookie("mycookie", "myvalue") self.write("Your cookie was not set yet!") else: self.write("Your cookie was set!") .. testoutput:: :hide: Cookies are not secure and can easily be modified by clients. If you need to set cookies to, e.g., identify the currently logged in user, you need to sign your cookies to prevent forgery. Tornado supports signed cookies with the `~.RequestHandler.set_secure_cookie` and `~.RequestHandler.get_secure_cookie` methods. To use these methods, you need to specify a secret key named ``cookie_secret`` when you create your application. You can pass in application settings as keyword arguments to your application: .. testcode:: application = tornado.web.Application([ (r"/", MainHandler), ], cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__") .. testoutput:: :hide: Signed cookies contain the encoded value of the cookie in addition to a timestamp and an `HMAC `_ signature. If the cookie is old or if the signature doesn't match, ``get_secure_cookie`` will return ``None`` just as if the cookie isn't set. The secure version of the example above: .. testcode:: class MainHandler(tornado.web.RequestHandler): def get(self): if not self.get_secure_cookie("mycookie"): self.set_secure_cookie("mycookie", "myvalue") self.write("Your cookie was not set yet!") else: self.write("Your cookie was set!") .. testoutput:: :hide: Tornado's secure cookies guarantee integrity but not confidentiality. That is, the cookie cannot be modified but its contents can be seen by the user. The ``cookie_secret`` is a symmetric key and must be kept secret -- anyone who obtains the value of this key could produce their own signed cookies. By default, Tornado's secure cookies expire after 30 days. To change this, use the ``expires_days`` keyword argument to ``set_secure_cookie`` *and* the ``max_age_days`` argument to ``get_secure_cookie``. These two values are passed separately so that you may e.g. have a cookie that is valid for 30 days for most purposes, but for certain sensitive actions (such as changing billing information) you use a smaller ``max_age_days`` when reading the cookie. Tornado also supports multiple signing keys to enable signing key rotation. ``cookie_secret`` then must be a dict with integer key versions as keys and the corresponding secrets as values. The currently used signing key must then be set as ``key_version`` application setting but all other keys in the dict are allowed for cookie signature validation, if the correct key version is set in the cookie. To implement cookie updates, the current signing key version can be queried via `~.RequestHandler.get_secure_cookie_key_version`. .. _user-authentication: User authentication ~~~~~~~~~~~~~~~~~~~ The currently authenticated user is available in every request handler as `self.current_user <.RequestHandler.current_user>`, and in every template as ``current_user``. By default, ``current_user`` is ``None``. To implement user authentication in your application, you need to override the ``get_current_user()`` method in your request handlers to determine the current user based on, e.g., the value of a cookie. Here is an example that lets users log into the application simply by specifying a nickname, which is then saved in a cookie: .. testcode:: class BaseHandler(tornado.web.RequestHandler): def get_current_user(self): return self.get_secure_cookie("user") class MainHandler(BaseHandler): def get(self): if not self.current_user: self.redirect("/login") return name = tornado.escape.xhtml_escape(self.current_user) self.write("Hello, " + name) class LoginHandler(BaseHandler): def get(self): self.write('
' 'Name: ' '' '
') def post(self): self.set_secure_cookie("user", self.get_argument("name")) self.redirect("/") application = tornado.web.Application([ (r"/", MainHandler), (r"/login", LoginHandler), ], cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__") .. testoutput:: :hide: You can require that the user be logged in using the `Python decorator `_ `tornado.web.authenticated`. If a request goes to a method with this decorator, and the user is not logged in, they will be redirected to ``login_url`` (another application setting). The example above could be rewritten: .. testcode:: class MainHandler(BaseHandler): @tornado.web.authenticated def get(self): name = tornado.escape.xhtml_escape(self.current_user) self.write("Hello, " + name) settings = { "cookie_secret": "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", "login_url": "/login", } application = tornado.web.Application([ (r"/", MainHandler), (r"/login", LoginHandler), ], **settings) .. testoutput:: :hide: If you decorate ``post()`` methods with the ``authenticated`` decorator, and the user is not logged in, the server will send a ``403`` response. The ``@authenticated`` decorator is simply shorthand for ``if not self.current_user: self.redirect()`` and may not be appropriate for non-browser-based login schemes. Check out the `Tornado Blog example application `_ for a complete example that uses authentication (and stores user data in a PostgreSQL database). Third party authentication ~~~~~~~~~~~~~~~~~~~~~~~~~~ The `tornado.auth` module implements the authentication and authorization protocols for a number of the most popular sites on the web, including Google/Gmail, Facebook, Twitter, and FriendFeed. The module includes methods to log users in via these sites and, where applicable, methods to authorize access to the service so you can, e.g., download a user's address book or publish a Twitter message on their behalf. Here is an example handler that uses Google for authentication, saving the Google credentials in a cookie for later access: .. testcode:: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): async def get(self): if self.get_argument('code', False): user = await self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) # Save the user with e.g. set_secure_cookie else: await self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) .. testoutput:: :hide: See the `tornado.auth` module documentation for more details. .. _xsrf: Cross-site request forgery protection ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `Cross-site request forgery `_, or XSRF, is a common problem for personalized web applications. See the `Wikipedia article `_ for more information on how XSRF works. The generally accepted solution to prevent XSRF is to cookie every user with an unpredictable value and include that value as an additional argument with every form submission on your site. If the cookie and the value in the form submission do not match, then the request is likely forged. Tornado comes with built-in XSRF protection. To include it in your site, include the application setting ``xsrf_cookies``: .. testcode:: settings = { "cookie_secret": "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", "login_url": "/login", "xsrf_cookies": True, } application = tornado.web.Application([ (r"/", MainHandler), (r"/login", LoginHandler), ], **settings) .. testoutput:: :hide: If ``xsrf_cookies`` is set, the Tornado web application will set the ``_xsrf`` cookie for all users and reject all ``POST``, ``PUT``, and ``DELETE`` requests that do not contain a correct ``_xsrf`` value. If you turn this setting on, you need to instrument all forms that submit via ``POST`` to contain this field. You can do this with the special `.UIModule` ``xsrf_form_html()``, available in all templates::
{% module xsrf_form_html() %}
If you submit AJAX ``POST`` requests, you will also need to instrument your JavaScript to include the ``_xsrf`` value with each request. This is the `jQuery `_ function we use at FriendFeed for AJAX ``POST`` requests that automatically adds the ``_xsrf`` value to all requests:: function getCookie(name) { var r = document.cookie.match("\\b" + name + "=([^;]*)\\b"); return r ? r[1] : undefined; } jQuery.postJSON = function(url, args, callback) { args._xsrf = getCookie("_xsrf"); $.ajax({url: url, data: $.param(args), dataType: "text", type: "POST", success: function(response) { callback(eval("(" + response + ")")); }}); }; For ``PUT`` and ``DELETE`` requests (as well as ``POST`` requests that do not use form-encoded arguments), the XSRF token may also be passed via an HTTP header named ``X-XSRFToken``. The XSRF cookie is normally set when ``xsrf_form_html`` is used, but in a pure-JavaScript application that does not use any regular forms you may need to access ``self.xsrf_token`` manually (just reading the property is enough to set the cookie as a side effect). If you need to customize XSRF behavior on a per-handler basis, you can override `.RequestHandler.check_xsrf_cookie()`. For example, if you have an API whose authentication does not use cookies, you may want to disable XSRF protection by making ``check_xsrf_cookie()`` do nothing. However, if you support both cookie and non-cookie-based authentication, it is important that XSRF protection be used whenever the current request is authenticated with a cookie. .. _dnsrebinding: DNS Rebinding ~~~~~~~~~~~~~ `DNS rebinding `_ is an attack that can bypass the same-origin policy and allow external sites to access resources on private networks. This attack involves a DNS name (with a short TTL) that alternates between returning an IP address controlled by the attacker and one controlled by the victim (often a guessable private IP address such as ``127.0.0.1`` or ``192.168.1.1``). Applications that use TLS are *not* vulnerable to this attack (because the browser will display certificate mismatch warnings that block automated access to the target site). Applications that cannot use TLS and rely on network-level access controls (for example, assuming that a server on ``127.0.0.1`` can only be accessed by the local machine) should guard against DNS rebinding by validating the ``Host`` HTTP header. This means passing a restrictive hostname pattern to either a `.HostMatches` router or the first argument of `.Application.add_handlers`:: # BAD: uses a default host pattern of r'.*' app = Application([('/foo', FooHandler)]) # GOOD: only matches localhost or its ip address. app = Application() app.add_handlers(r'(localhost|127\.0\.0\.1)', [('/foo', FooHandler)]) # GOOD: same as previous example using tornado.routing. app = Application([ (HostMatches(r'(localhost|127\.0\.0\.1)'), [('/foo', FooHandler)]), ]) In addition, the ``default_host`` argument to `.Application` and the `.DefaultHostMatches` router must not be used in applications that may be vulnerable to DNS rebinding, because it has a similar effect to a wildcard host pattern. tornado-6.1.0/docs/guide/structure.rst000066400000000000000000000317631374705040500200220ustar00rootroot00000000000000.. currentmodule:: tornado.web .. testsetup:: import tornado.web Structure of a Tornado web application ====================================== A Tornado web application generally consists of one or more `.RequestHandler` subclasses, an `.Application` object which routes incoming requests to handlers, and a ``main()`` function to start the server. A minimal "hello world" example looks something like this: .. testcode:: import tornado.ioloop import tornado.web class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") def make_app(): return tornado.web.Application([ (r"/", MainHandler), ]) if __name__ == "__main__": app = make_app() app.listen(8888) tornado.ioloop.IOLoop.current().start() .. testoutput:: :hide: The ``Application`` object ~~~~~~~~~~~~~~~~~~~~~~~~~~ The `.Application` object is responsible for global configuration, including the routing table that maps requests to handlers. The routing table is a list of `.URLSpec` objects (or tuples), each of which contains (at least) a regular expression and a handler class. Order matters; the first matching rule is used. If the regular expression contains capturing groups, these groups are the *path arguments* and will be passed to the handler's HTTP method. If a dictionary is passed as the third element of the `.URLSpec`, it supplies the *initialization arguments* which will be passed to `.RequestHandler.initialize`. Finally, the `.URLSpec` may have a name, which will allow it to be used with `.RequestHandler.reverse_url`. For example, in this fragment the root URL ``/`` is mapped to ``MainHandler`` and URLs of the form ``/story/`` followed by a number are mapped to ``StoryHandler``. That number is passed (as a string) to ``StoryHandler.get``. :: class MainHandler(RequestHandler): def get(self): self.write('link to story 1' % self.reverse_url("story", "1")) class StoryHandler(RequestHandler): def initialize(self, db): self.db = db def get(self, story_id): self.write("this is story %s" % story_id) app = Application([ url(r"/", MainHandler), url(r"/story/([0-9]+)", StoryHandler, dict(db=db), name="story") ]) The `.Application` constructor takes many keyword arguments that can be used to customize the behavior of the application and enable optional features; see `.Application.settings` for the complete list. Subclassing ``RequestHandler`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Most of the work of a Tornado web application is done in subclasses of `.RequestHandler`. The main entry point for a handler subclass is a method named after the HTTP method being handled: ``get()``, ``post()``, etc. Each handler may define one or more of these methods to handle different HTTP actions. As described above, these methods will be called with arguments corresponding to the capturing groups of the routing rule that matched. Within a handler, call methods such as `.RequestHandler.render` or `.RequestHandler.write` to produce a response. ``render()`` loads a `.Template` by name and renders it with the given arguments. ``write()`` is used for non-template-based output; it accepts strings, bytes, and dictionaries (dicts will be encoded as JSON). Many methods in `.RequestHandler` are designed to be overridden in subclasses and be used throughout the application. It is common to define a ``BaseHandler`` class that overrides methods such as `~.RequestHandler.write_error` and `~.RequestHandler.get_current_user` and then subclass your own ``BaseHandler`` instead of `.RequestHandler` for all your specific handlers. Handling request input ~~~~~~~~~~~~~~~~~~~~~~ The request handler can access the object representing the current request with ``self.request``. See the class definition for `~tornado.httputil.HTTPServerRequest` for a complete list of attributes. Request data in the formats used by HTML forms will be parsed for you and is made available in methods like `~.RequestHandler.get_query_argument` and `~.RequestHandler.get_body_argument`. .. testcode:: class MyFormHandler(tornado.web.RequestHandler): def get(self): self.write('
' '' '' '
') def post(self): self.set_header("Content-Type", "text/plain") self.write("You wrote " + self.get_body_argument("message")) .. testoutput:: :hide: Since the HTML form encoding is ambiguous as to whether an argument is a single value or a list with one element, `.RequestHandler` has distinct methods to allow the application to indicate whether or not it expects a list. For lists, use `~.RequestHandler.get_query_arguments` and `~.RequestHandler.get_body_arguments` instead of their singular counterparts. Files uploaded via a form are available in ``self.request.files``, which maps names (the name of the HTML ```` element) to a list of files. Each file is a dictionary of the form ``{"filename":..., "content_type":..., "body":...}``. The ``files`` object is only present if the files were uploaded with a form wrapper (i.e. a ``multipart/form-data`` Content-Type); if this format was not used the raw uploaded data is available in ``self.request.body``. By default uploaded files are fully buffered in memory; if you need to handle files that are too large to comfortably keep in memory see the `.stream_request_body` class decorator. In the demos directory, `file_receiver.py `_ shows both methods of receiving file uploads. Due to the quirks of the HTML form encoding (e.g. the ambiguity around singular versus plural arguments), Tornado does not attempt to unify form arguments with other types of input. In particular, we do not parse JSON request bodies. Applications that wish to use JSON instead of form-encoding may override `~.RequestHandler.prepare` to parse their requests:: def prepare(self): if self.request.headers.get("Content-Type", "").startswith("application/json"): self.json_args = json.loads(self.request.body) else: self.json_args = None Overriding RequestHandler methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In addition to ``get()``/``post()``/etc, certain other methods in `.RequestHandler` are designed to be overridden by subclasses when necessary. On every request, the following sequence of calls takes place: 1. A new `.RequestHandler` object is created on each request. 2. `~.RequestHandler.initialize()` is called with the initialization arguments from the `.Application` configuration. ``initialize`` should typically just save the arguments passed into member variables; it may not produce any output or call methods like `~.RequestHandler.send_error`. 3. `~.RequestHandler.prepare()` is called. This is most useful in a base class shared by all of your handler subclasses, as ``prepare`` is called no matter which HTTP method is used. ``prepare`` may produce output; if it calls `~.RequestHandler.finish` (or ``redirect``, etc), processing stops here. 4. One of the HTTP methods is called: ``get()``, ``post()``, ``put()``, etc. If the URL regular expression contains capturing groups, they are passed as arguments to this method. 5. When the request is finished, `~.RequestHandler.on_finish()` is called. This is generally after ``get()`` or another HTTP method returns. All methods designed to be overridden are noted as such in the `.RequestHandler` documentation. Some of the most commonly overridden methods include: - `~.RequestHandler.write_error` - outputs HTML for use on error pages. - `~.RequestHandler.on_connection_close` - called when the client disconnects; applications may choose to detect this case and halt further processing. Note that there is no guarantee that a closed connection can be detected promptly. - `~.RequestHandler.get_current_user` - see :ref:`user-authentication`. - `~.RequestHandler.get_user_locale` - returns `.Locale` object to use for the current user. - `~.RequestHandler.set_default_headers` - may be used to set additional headers on the response (such as a custom ``Server`` header). Error Handling ~~~~~~~~~~~~~~ If a handler raises an exception, Tornado will call `.RequestHandler.write_error` to generate an error page. `tornado.web.HTTPError` can be used to generate a specified status code; all other exceptions return a 500 status. The default error page includes a stack trace in debug mode and a one-line description of the error (e.g. "500: Internal Server Error") otherwise. To produce a custom error page, override `RequestHandler.write_error` (probably in a base class shared by all your handlers). This method may produce output normally via methods such as `~RequestHandler.write` and `~RequestHandler.render`. If the error was caused by an exception, an ``exc_info`` triple will be passed as a keyword argument (note that this exception is not guaranteed to be the current exception in `sys.exc_info`, so ``write_error`` must use e.g. `traceback.format_exception` instead of `traceback.format_exc`). It is also possible to generate an error page from regular handler methods instead of ``write_error`` by calling `~.RequestHandler.set_status`, writing a response, and returning. The special exception `tornado.web.Finish` may be raised to terminate the handler without calling ``write_error`` in situations where simply returning is not convenient. For 404 errors, use the ``default_handler_class`` `Application setting <.Application.settings>`. This handler should override `~.RequestHandler.prepare` instead of a more specific method like ``get()`` so it works with any HTTP method. It should produce its error page as described above: either by raising a ``HTTPError(404)`` and overriding ``write_error``, or calling ``self.set_status(404)`` and producing the response directly in ``prepare()``. Redirection ~~~~~~~~~~~ There are two main ways you can redirect requests in Tornado: `.RequestHandler.redirect` and with the `.RedirectHandler`. You can use ``self.redirect()`` within a `.RequestHandler` method to redirect users elsewhere. There is also an optional parameter ``permanent`` which you can use to indicate that the redirection is considered permanent. The default value of ``permanent`` is ``False``, which generates a ``302 Found`` HTTP response code and is appropriate for things like redirecting users after successful ``POST`` requests. If ``permanent`` is ``True``, the ``301 Moved Permanently`` HTTP response code is used, which is useful for e.g. redirecting to a canonical URL for a page in an SEO-friendly manner. `.RedirectHandler` lets you configure redirects directly in your `.Application` routing table. For example, to configure a single static redirect:: app = tornado.web.Application([ url(r"/app", tornado.web.RedirectHandler, dict(url="http://itunes.apple.com/my-app-id")), ]) `.RedirectHandler` also supports regular expression substitutions. The following rule redirects all requests beginning with ``/pictures/`` to the prefix ``/photos/`` instead:: app = tornado.web.Application([ url(r"/photos/(.*)", MyPhotoHandler), url(r"/pictures/(.*)", tornado.web.RedirectHandler, dict(url=r"/photos/{0}")), ]) Unlike `.RequestHandler.redirect`, `.RedirectHandler` uses permanent redirects by default. This is because the routing table does not change at runtime and is presumed to be permanent, while redirects found in handlers are likely to be the result of other logic that may change. To send a temporary redirect with a `.RedirectHandler`, add ``permanent=False`` to the `.RedirectHandler` initialization arguments. Asynchronous handlers ~~~~~~~~~~~~~~~~~~~~~ Certain handler methods (including ``prepare()`` and the HTTP verb methods ``get()``/``post()``/etc) may be overridden as coroutines to make the handler asynchronous. For example, here is a simple handler using a coroutine: .. testcode:: class MainHandler(tornado.web.RequestHandler): async def get(self): http = tornado.httpclient.AsyncHTTPClient() response = await http.fetch("http://friendfeed-api.com/v2/feed/bret") json = tornado.escape.json_decode(response.body) self.write("Fetched " + str(len(json["entries"])) + " entries " "from the FriendFeed API") .. testoutput:: :hide: For a more advanced asynchronous example, take a look at the `chat example application `_, which implements an AJAX chat room using `long polling `_. Users of long polling may want to override ``on_connection_close()`` to clean up after the client closes the connection (but see that method's docstring for caveats). tornado-6.1.0/docs/guide/templates.rst000066400000000000000000000304151374705040500177510ustar00rootroot00000000000000Templates and UI ================ .. testsetup:: import tornado.web Tornado includes a simple, fast, and flexible templating language. This section describes that language as well as related issues such as internationalization. Tornado can also be used with any other Python template language, although there is no provision for integrating these systems into `.RequestHandler.render`. Simply render the template to a string and pass it to `.RequestHandler.write` Configuring templates ~~~~~~~~~~~~~~~~~~~~~ By default, Tornado looks for template files in the same directory as the ``.py`` files that refer to them. To put your template files in a different directory, use the ``template_path`` `Application setting <.Application.settings>` (or override `.RequestHandler.get_template_path` if you have different template paths for different handlers). To load templates from a non-filesystem location, subclass `tornado.template.BaseLoader` and pass an instance as the ``template_loader`` application setting. Compiled templates are cached by default; to turn off this caching and reload templates so changes to the underlying files are always visible, use the application settings ``compiled_template_cache=False`` or ``debug=True``. Template syntax ~~~~~~~~~~~~~~~ A Tornado template is just HTML (or any other text-based format) with Python control sequences and expressions embedded within the markup:: {{ title }}
    {% for item in items %}
  • {{ escape(item) }}
  • {% end %}
If you saved this template as "template.html" and put it in the same directory as your Python file, you could render this template with: .. testcode:: class MainHandler(tornado.web.RequestHandler): def get(self): items = ["Item 1", "Item 2", "Item 3"] self.render("template.html", title="My title", items=items) .. testoutput:: :hide: Tornado templates support *control statements* and *expressions*. Control statements are surrounded by ``{%`` and ``%}``, e.g. ``{% if len(items) > 2 %}``. Expressions are surrounded by ``{{`` and ``}}``, e.g. ``{{ items[0] }}``. Control statements more or less map exactly to Python statements. We support ``if``, ``for``, ``while``, and ``try``, all of which are terminated with ``{% end %}``. We also support *template inheritance* using the ``extends`` and ``block`` statements, which are described in detail in the documentation for the `tornado.template`. Expressions can be any Python expression, including function calls. Template code is executed in a namespace that includes the following objects and functions. (Note that this list applies to templates rendered using `.RequestHandler.render` and `~.RequestHandler.render_string`. If you're using the `tornado.template` module directly outside of a `.RequestHandler` many of these entries are not present). - ``escape``: alias for `tornado.escape.xhtml_escape` - ``xhtml_escape``: alias for `tornado.escape.xhtml_escape` - ``url_escape``: alias for `tornado.escape.url_escape` - ``json_encode``: alias for `tornado.escape.json_encode` - ``squeeze``: alias for `tornado.escape.squeeze` - ``linkify``: alias for `tornado.escape.linkify` - ``datetime``: the Python `datetime` module - ``handler``: the current `.RequestHandler` object - ``request``: alias for `handler.request <.HTTPServerRequest>` - ``current_user``: alias for `handler.current_user <.RequestHandler.current_user>` - ``locale``: alias for `handler.locale <.Locale>` - ``_``: alias for `handler.locale.translate <.Locale.translate>` - ``static_url``: alias for `handler.static_url <.RequestHandler.static_url>` - ``xsrf_form_html``: alias for `handler.xsrf_form_html <.RequestHandler.xsrf_form_html>` - ``reverse_url``: alias for `.Application.reverse_url` - All entries from the ``ui_methods`` and ``ui_modules`` ``Application`` settings - Any keyword arguments passed to `~.RequestHandler.render` or `~.RequestHandler.render_string` When you are building a real application, you are going to want to use all of the features of Tornado templates, especially template inheritance. Read all about those features in the `tornado.template` section (some features, including ``UIModules`` are implemented in the `tornado.web` module) Under the hood, Tornado templates are translated directly to Python. The expressions you include in your template are copied verbatim into a Python function representing your template. We don't try to prevent anything in the template language; we created it explicitly to provide the flexibility that other, stricter templating systems prevent. Consequently, if you write random stuff inside of your template expressions, you will get random Python errors when you execute the template. All template output is escaped by default, using the `tornado.escape.xhtml_escape` function. This behavior can be changed globally by passing ``autoescape=None`` to the `.Application` or `.tornado.template.Loader` constructors, for a template file with the ``{% autoescape None %}`` directive, or for a single expression by replacing ``{{ ... }}`` with ``{% raw ...%}``. Additionally, in each of these places the name of an alternative escaping function may be used instead of ``None``. Note that while Tornado's automatic escaping is helpful in avoiding XSS vulnerabilities, it is not sufficient in all cases. Expressions that appear in certain locations, such as in JavaScript or CSS, may need additional escaping. Additionally, either care must be taken to always use double quotes and `.xhtml_escape` in HTML attributes that may contain untrusted content, or a separate escaping function must be used for attributes (see e.g. `this blog post `_). Internationalization ~~~~~~~~~~~~~~~~~~~~ The locale of the current user (whether they are logged in or not) is always available as ``self.locale`` in the request handler and as ``locale`` in templates. The name of the locale (e.g., ``en_US``) is available as ``locale.name``, and you can translate strings with the `.Locale.translate` method. Templates also have the global function call ``_()`` available for string translation. The translate function has two forms:: _("Translate this string") which translates the string directly based on the current locale, and:: _("A person liked this", "%(num)d people liked this", len(people)) % {"num": len(people)} which translates a string that can be singular or plural based on the value of the third argument. In the example above, a translation of the first string will be returned if ``len(people)`` is ``1``, or a translation of the second string will be returned otherwise. The most common pattern for translations is to use Python named placeholders for variables (the ``%(num)d`` in the example above) since placeholders can move around on translation. Here is a properly internationalized template:: FriendFeed - {{ _("Sign in") }}
{{ _("Username") }}
{{ _("Password") }}
{% module xsrf_form_html() %}
By default, we detect the user's locale using the ``Accept-Language`` header sent by the user's browser. We choose ``en_US`` if we can't find an appropriate ``Accept-Language`` value. If you let user's set their locale as a preference, you can override this default locale selection by overriding `.RequestHandler.get_user_locale`: .. testcode:: class BaseHandler(tornado.web.RequestHandler): def get_current_user(self): user_id = self.get_secure_cookie("user") if not user_id: return None return self.backend.get_user_by_id(user_id) def get_user_locale(self): if "locale" not in self.current_user.prefs: # Use the Accept-Language header return None return self.current_user.prefs["locale"] .. testoutput:: :hide: If ``get_user_locale`` returns ``None``, we fall back on the ``Accept-Language`` header. The `tornado.locale` module supports loading translations in two formats: the ``.mo`` format used by `gettext` and related tools, and a simple ``.csv`` format. An application will generally call either `tornado.locale.load_translations` or `tornado.locale.load_gettext_translations` once at startup; see those methods for more details on the supported formats. You can get the list of supported locales in your application with `tornado.locale.get_supported_locales()`. The user's locale is chosen to be the closest match based on the supported locales. For example, if the user's locale is ``es_GT``, and the ``es`` locale is supported, ``self.locale`` will be ``es`` for that request. We fall back on ``en_US`` if no close match can be found. .. _ui-modules: UI modules ~~~~~~~~~~ Tornado supports *UI modules* to make it easy to support standard, reusable UI widgets across your application. UI modules are like special function calls to render components of your page, and they can come packaged with their own CSS and JavaScript. For example, if you are implementing a blog, and you want to have blog entries appear on both the blog home page and on each blog entry page, you can make an ``Entry`` module to render them on both pages. First, create a Python module for your UI modules, e.g. ``uimodules.py``:: class Entry(tornado.web.UIModule): def render(self, entry, show_comments=False): return self.render_string( "module-entry.html", entry=entry, show_comments=show_comments) Tell Tornado to use ``uimodules.py`` using the ``ui_modules`` setting in your application:: from . import uimodules class HomeHandler(tornado.web.RequestHandler): def get(self): entries = self.db.query("SELECT * FROM entries ORDER BY date DESC") self.render("home.html", entries=entries) class EntryHandler(tornado.web.RequestHandler): def get(self, entry_id): entry = self.db.get("SELECT * FROM entries WHERE id = %s", entry_id) if not entry: raise tornado.web.HTTPError(404) self.render("entry.html", entry=entry) settings = { "ui_modules": uimodules, } application = tornado.web.Application([ (r"/", HomeHandler), (r"/entry/([0-9]+)", EntryHandler), ], **settings) Within a template, you can call a module with the ``{% module %}`` statement. For example, you could call the ``Entry`` module from both ``home.html``:: {% for entry in entries %} {% module Entry(entry) %} {% end %} and ``entry.html``:: {% module Entry(entry, show_comments=True) %} Modules can include custom CSS and JavaScript functions by overriding the ``embedded_css``, ``embedded_javascript``, ``javascript_files``, or ``css_files`` methods:: class Entry(tornado.web.UIModule): def embedded_css(self): return ".entry { margin-bottom: 1em; }" def render(self, entry, show_comments=False): return self.render_string( "module-entry.html", show_comments=show_comments) Module CSS and JavaScript will be included once no matter how many times a module is used on a page. CSS is always included in the ```` of the page, and JavaScript is always included just before the ```` tag at the end of the page. When additional Python code is not required, a template file itself may be used as a module. For example, the preceding example could be rewritten to put the following in ``module-entry.html``:: {{ set_resources(embedded_css=".entry { margin-bottom: 1em; }") }} This revised template module would be invoked with:: {% module Template("module-entry.html", show_comments=True) %} The ``set_resources`` function is only available in templates invoked via ``{% module Template(...) %}``. Unlike the ``{% include ... %}`` directive, template modules have a distinct namespace from their containing template - they can only see the global template namespace and their own keyword arguments. tornado-6.1.0/docs/http.rst000066400000000000000000000001741374705040500156340ustar00rootroot00000000000000HTTP servers and clients ======================== .. toctree:: httpserver httpclient httputil http1connection tornado-6.1.0/docs/http1connection.rst000066400000000000000000000003001374705040500177640ustar00rootroot00000000000000``tornado.http1connection`` -- HTTP/1.x client/server implementation ==================================================================== .. automodule:: tornado.http1connection :members: tornado-6.1.0/docs/httpclient.rst000066400000000000000000000031231374705040500170300ustar00rootroot00000000000000``tornado.httpclient`` --- Asynchronous HTTP client =================================================== .. automodule:: tornado.httpclient HTTP client interfaces ---------------------- .. autoclass:: HTTPClient :members: .. autoclass:: AsyncHTTPClient :members: Request objects --------------- .. autoclass:: HTTPRequest :members: Response objects ---------------- .. autoclass:: HTTPResponse :members: Exceptions ---------- .. autoexception:: HTTPClientError :members: .. exception:: HTTPError Alias for `HTTPClientError`. Command-line interface ---------------------- This module provides a simple command-line interface to fetch a url using Tornado's HTTP client. Example usage:: # Fetch the url and print its body python -m tornado.httpclient http://www.google.com # Just print the headers python -m tornado.httpclient --print_headers --print_body=false http://www.google.com Implementations ~~~~~~~~~~~~~~~ .. automodule:: tornado.simple_httpclient .. autoclass:: SimpleAsyncHTTPClient :members: .. module:: tornado.curl_httpclient .. class:: CurlAsyncHTTPClient(max_clients=10, defaults=None) ``libcurl``-based HTTP client. Example Code ~~~~~~~~~~~~ * `A simple webspider `_ shows how to fetch URLs concurrently. * `The file uploader demo `_ uses either HTTP POST or HTTP PUT to upload files to a server. tornado-6.1.0/docs/httpserver.rst000066400000000000000000000016161374705040500170650ustar00rootroot00000000000000``tornado.httpserver`` --- Non-blocking HTTP server =================================================== .. automodule:: tornado.httpserver HTTP Server ----------- .. autoclass:: HTTPServer(request_callback: Union[httputil.HTTPServerConnectionDelegate, Callable[[httputil.HTTPServerRequest], None]], no_keep_alive: bool = False, xheaders: bool = False, ssl_options: Union[Dict[str, Any], ssl.SSLContext] = None, protocol: Optional[str] = None, decompress_request: bool = False, chunk_size: Optional[int] = None, max_header_size: Optional[int] = None, idle_connection_timeout: Optional[float] = None, body_timeout: Optional[float] = None, max_body_size: Optional[int] = None, max_buffer_size: Optional[int] = None, trusted_downstream: Optional[List[str]] = None) :members: The public interface of this class is mostly inherited from `.TCPServer` and is documented under that class. tornado-6.1.0/docs/httputil.rst000066400000000000000000000003261374705040500165310ustar00rootroot00000000000000``tornado.httputil`` --- Manipulate HTTP headers and URLs ========================================================= .. testsetup:: from tornado.httputil import * .. automodule:: tornado.httputil :members: tornado-6.1.0/docs/index.rst000066400000000000000000000133121374705040500157620ustar00rootroot00000000000000.. title:: Tornado Web Server .. meta:: :google-site-verification: g4bVhgwbVO1d9apCUsT-eKlApg31Cygbp8VGZY8Rf0g |Tornado Web Server| ==================== .. |Tornado Web Server| image:: tornado.png :alt: Tornado Web Server `Tornado `_ is a Python web framework and asynchronous networking library, originally developed at `FriendFeed `_. By using non-blocking network I/O, Tornado can scale to tens of thousands of open connections, making it ideal for `long polling `_, `WebSockets `_, and other applications that require a long-lived connection to each user. Quick links ----------- * Current version: |version| (`download from PyPI `_, :doc:`release notes `) * `Source (GitHub) `_ * Mailing lists: `discussion `_ and `announcements `_ * `Stack Overflow `_ * `Wiki `_ Hello, world ------------ Here is a simple "Hello, world" example web app for Tornado:: import tornado.ioloop import tornado.web class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") def make_app(): return tornado.web.Application([ (r"/", MainHandler), ]) if __name__ == "__main__": app = make_app() app.listen(8888) tornado.ioloop.IOLoop.current().start() This example does not use any of Tornado's asynchronous features; for that see this `simple chat room `_. Threads and WSGI ---------------- Tornado is different from most Python web frameworks. It is not based on `WSGI `_, and it is typically run with only one thread per process. See the :doc:`guide` for more on Tornado's approach to asynchronous programming. While some support of WSGI is available in the `tornado.wsgi` module, it is not a focus of development and most applications should be written to use Tornado's own interfaces (such as `tornado.web`) directly instead of using WSGI. In general, Tornado code is not thread-safe. The only method in Tornado that is safe to call from other threads is `.IOLoop.add_callback`. You can also use `.IOLoop.run_in_executor` to asynchronously run a blocking function on another thread, but note that the function passed to ``run_in_executor`` should avoid referencing any Tornado objects. ``run_in_executor`` is the recommended way to interact with blocking code. ``asyncio`` Integration ----------------------- Tornado is integrated with the standard library `asyncio` module and shares the same event loop (by default since Tornado 5.0). In general, libraries designed for use with `asyncio` can be mixed freely with Tornado. Installation ------------ :: pip install tornado Tornado is listed in `PyPI `_ and can be installed with ``pip``. Note that the source distribution includes demo applications that are not present when Tornado is installed in this way, so you may wish to download a copy of the source tarball or clone the `git repository `_ as well. **Prerequisites**: Tornado 6.0 requires Python 3.5.2 or newer (See `Tornado 5.1 `_ if compatibility with Python 2.7 is required). The following optional packages may be useful: * `pycurl `_ is used by the optional ``tornado.curl_httpclient``. Libcurl version 7.22 or higher is required. * `Twisted `_ may be used with the classes in `tornado.platform.twisted`. * `pycares `_ is an alternative non-blocking DNS resolver that can be used when threads are not appropriate. **Platforms**: Tornado is designed for Unix-like platforms, with best performance and scalability on systems supporting ``epoll`` (Linux), ``kqueue`` (BSD/macOS), or ``/dev/poll`` (Solaris). Tornado will also run on Windows, although this configuration is not officially supported or recommended for production use. Some features are missing on Windows (including multi-process mode) and scalability is limited (Even though Tornado is built on ``asyncio``, which supports Windows, Tornado does not use the APIs that are necessary for scalable networking on Windows). Documentation ------------- This documentation is also available in `PDF and Epub formats `_. .. toctree:: :titlesonly: guide webframework http networking coroutine integration utilities faq releases * :ref:`genindex` * :ref:`modindex` * :ref:`search` Discussion and support ---------------------- You can discuss Tornado on `the Tornado developer mailing list `_, and report bugs on the `GitHub issue tracker `_. Links to additional resources can be found on the `Tornado wiki `_. New releases are announced on the `announcements mailing list `_. Tornado is available under the `Apache License, Version 2.0 `_. This web site and all documentation is licensed under `Creative Commons 3.0 `_. tornado-6.1.0/docs/integration.rst000066400000000000000000000002061374705040500171740ustar00rootroot00000000000000Integration with other services =============================== .. toctree:: auth wsgi caresresolver twisted asyncio tornado-6.1.0/docs/ioloop.rst000066400000000000000000000023571374705040500161630ustar00rootroot00000000000000``tornado.ioloop`` --- Main event loop ====================================== .. automodule:: tornado.ioloop IOLoop objects -------------- .. autoclass:: IOLoop Running an IOLoop ^^^^^^^^^^^^^^^^^ .. automethod:: IOLoop.current .. automethod:: IOLoop.make_current .. automethod:: IOLoop.clear_current .. automethod:: IOLoop.start .. automethod:: IOLoop.stop .. automethod:: IOLoop.run_sync .. automethod:: IOLoop.close .. automethod:: IOLoop.instance .. automethod:: IOLoop.install .. automethod:: IOLoop.clear_instance I/O events ^^^^^^^^^^ .. automethod:: IOLoop.add_handler .. automethod:: IOLoop.update_handler .. automethod:: IOLoop.remove_handler Callbacks and timeouts ^^^^^^^^^^^^^^^^^^^^^^ .. automethod:: IOLoop.add_callback .. automethod:: IOLoop.add_callback_from_signal .. automethod:: IOLoop.add_future .. automethod:: IOLoop.add_timeout .. automethod:: IOLoop.call_at .. automethod:: IOLoop.call_later .. automethod:: IOLoop.remove_timeout .. automethod:: IOLoop.spawn_callback .. automethod:: IOLoop.run_in_executor .. automethod:: IOLoop.set_default_executor .. automethod:: IOLoop.time .. autoclass:: PeriodicCallback :members: tornado-6.1.0/docs/iostream.rst000066400000000000000000000025621374705040500165030ustar00rootroot00000000000000``tornado.iostream`` --- Convenient wrappers for non-blocking sockets ===================================================================== .. automodule:: tornado.iostream Base class ---------- .. autoclass:: BaseIOStream Main interface ^^^^^^^^^^^^^^ .. automethod:: BaseIOStream.write .. automethod:: BaseIOStream.read_bytes .. automethod:: BaseIOStream.read_into .. automethod:: BaseIOStream.read_until .. automethod:: BaseIOStream.read_until_regex .. automethod:: BaseIOStream.read_until_close .. automethod:: BaseIOStream.close .. automethod:: BaseIOStream.set_close_callback .. automethod:: BaseIOStream.closed .. automethod:: BaseIOStream.reading .. automethod:: BaseIOStream.writing .. automethod:: BaseIOStream.set_nodelay Methods for subclasses ^^^^^^^^^^^^^^^^^^^^^^ .. automethod:: BaseIOStream.fileno .. automethod:: BaseIOStream.close_fd .. automethod:: BaseIOStream.write_to_fd .. automethod:: BaseIOStream.read_from_fd .. automethod:: BaseIOStream.get_fd_error Implementations --------------- .. autoclass:: IOStream :members: .. autoclass:: SSLIOStream :members: .. autoclass:: PipeIOStream :members: Exceptions ---------- .. autoexception:: StreamBufferFullError .. autoexception:: StreamClosedError .. autoexception:: UnsatisfiableReadError tornado-6.1.0/docs/locale.rst000066400000000000000000000002251374705040500161110ustar00rootroot00000000000000``tornado.locale`` --- Internationalization support =================================================== .. automodule:: tornado.locale :members: tornado-6.1.0/docs/locks.rst000066400000000000000000000021151374705040500157650ustar00rootroot00000000000000``tornado.locks`` -- Synchronization primitives =============================================== .. versionadded:: 4.2 Coordinate coroutines with synchronization primitives analogous to those the standard library provides to threads. These classes are very similar to those provided in the standard library's `asyncio package `_. .. warning:: Note that these primitives are not actually thread-safe and cannot be used in place of those from the standard library's `threading` module--they are meant to coordinate Tornado coroutines in a single-threaded app, not to protect shared objects in a multithreaded app. .. automodule:: tornado.locks Condition --------- .. autoclass:: Condition :members: Event ----- .. autoclass:: Event :members: Semaphore --------- .. autoclass:: Semaphore :members: BoundedSemaphore ---------------- .. autoclass:: BoundedSemaphore :members: :inherited-members: Lock ---- .. autoclass:: Lock :members: :inherited-members: tornado-6.1.0/docs/log.rst000066400000000000000000000001621374705040500154330ustar00rootroot00000000000000``tornado.log`` --- Logging support =================================== .. automodule:: tornado.log :members: tornado-6.1.0/docs/netutil.rst000066400000000000000000000002361374705040500163400ustar00rootroot00000000000000``tornado.netutil`` --- Miscellaneous network utilities ======================================================= .. automodule:: tornado.netutil :members: tornado-6.1.0/docs/networking.rst000066400000000000000000000001721374705040500170420ustar00rootroot00000000000000Asynchronous networking ======================= .. toctree:: ioloop iostream netutil tcpclient tcpserver tornado-6.1.0/docs/options.rst000066400000000000000000000020141374705040500163430ustar00rootroot00000000000000``tornado.options`` --- Command-line parsing ============================================ .. automodule:: tornado.options Global functions ---------------- .. autofunction:: define .. py:data:: options Global options object. All defined options are available as attributes on this object. .. autofunction:: parse_command_line .. autofunction:: parse_config_file .. autofunction:: print_help(file=sys.stderr) .. autofunction:: add_parse_callback .. autoexception:: Error OptionParser class ------------------ .. autoclass:: OptionParser .. automethod:: OptionParser.define .. automethod:: OptionParser.parse_command_line .. automethod:: OptionParser.parse_config_file .. automethod:: OptionParser.print_help .. automethod:: OptionParser.add_parse_callback .. automethod:: OptionParser.mockable .. automethod:: OptionParser.items .. automethod:: OptionParser.as_dict .. automethod:: OptionParser.groups .. automethod:: OptionParser.group_dict tornado-6.1.0/docs/process.rst000066400000000000000000000003731374705040500163340ustar00rootroot00000000000000``tornado.process`` --- Utilities for multiple processes ======================================================== .. automodule:: tornado.process :members: .. exception:: CalledProcessError An alias for `subprocess.CalledProcessError`. tornado-6.1.0/docs/queues.rst000066400000000000000000000010131374705040500161550ustar00rootroot00000000000000``tornado.queues`` -- Queues for coroutines =========================================== .. versionadded:: 4.2 .. automodule:: tornado.queues Classes ------- Queue ^^^^^ .. autoclass:: Queue :members: PriorityQueue ^^^^^^^^^^^^^ .. autoclass:: PriorityQueue :members: LifoQueue ^^^^^^^^^ .. autoclass:: LifoQueue :members: Exceptions ---------- QueueEmpty ^^^^^^^^^^ .. autoexception:: QueueEmpty QueueFull ^^^^^^^^^ .. autoexception:: QueueFull tornado-6.1.0/docs/releases.rst000066400000000000000000000017131374705040500164600ustar00rootroot00000000000000Release notes ============= .. toctree:: :maxdepth: 2 releases/v6.1.0 releases/v6.0.4 releases/v6.0.3 releases/v6.0.2 releases/v6.0.1 releases/v6.0.0 releases/v5.1.1 releases/v5.1.0 releases/v5.0.2 releases/v5.0.1 releases/v5.0.0 releases/v4.5.3 releases/v4.5.2 releases/v4.5.1 releases/v4.5.0 releases/v4.4.3 releases/v4.4.2 releases/v4.4.1 releases/v4.4.0 releases/v4.3.0 releases/v4.2.1 releases/v4.2.0 releases/v4.1.0 releases/v4.0.2 releases/v4.0.1 releases/v4.0.0 releases/v3.2.2 releases/v3.2.1 releases/v3.2.0 releases/v3.1.1 releases/v3.1.0 releases/v3.0.2 releases/v3.0.1 releases/v3.0.0 releases/v2.4.1 releases/v2.4.0 releases/v2.3.0 releases/v2.2.1 releases/v2.2.0 releases/v2.1.1 releases/v2.1.0 releases/v2.0.0 releases/v1.2.1 releases/v1.2.0 releases/v1.1.1 releases/v1.1.0 releases/v1.0.1 releases/v1.0.0 tornado-6.1.0/docs/releases/000077500000000000000000000000001374705040500157245ustar00rootroot00000000000000tornado-6.1.0/docs/releases/v1.0.0.rst000066400000000000000000000051531374705040500173040ustar00rootroot00000000000000What's new in Tornado 1.0 ========================= July 22, 2010 ------------- :: We are pleased to announce the release of Tornado 1.0, available from https://github.com/downloads/facebook/tornado/tornado-1.0.tar.gz. There have been many changes since version 0.2; here are some of the highlights: New features: * Improved support for running other WSGI applications in a Tornado server (tested with Django and CherryPy) * Improved performance on Mac OS X and BSD (kqueue-based IOLoop), and experimental support for win32 * Rewritten AsyncHTTPClient available as tornado.httpclient.AsyncHTTPClient2 (this will become the default in a future release) * Support for standard .mo files in addition to .csv in the locale module * Pre-forking support for running multiple Tornado processes at once (see HTTPServer.start()) * SSL and gzip support in HTTPServer * reverse_url() function refers to urls from the Application config by name from templates and RequestHandlers * RequestHandler.on_connection_close() callback is called when the client has closed the connection (subject to limitations of the underlying network stack, any proxies, etc) * Static files can now be served somewhere other than /static/ via the static_url_prefix application setting * URL regexes can now use named groups ("(?P)") to pass arguments to get()/post() via keyword instead of position * HTTP header dictionary-like objects now support multiple values for the same header via the get_all() and add() methods. * Several new options in the httpclient module, including prepare_curl_callback and header_callback * Improved logging configuration in tornado.options. * UIModule.html_body() can be used to return html to be inserted at the end of the document body. Backwards-incompatible changes: * RequestHandler.get_error_html() now receives the exception object as a keyword argument if the error was caused by an uncaught exception. * Secure cookies are now more secure, but incompatible with cookies set by Tornado 0.2. To read cookies set by older versions of Tornado, pass include_name=False to RequestHandler.get_secure_cookie() * Parameters passed to RequestHandler.get/post() by extraction from the path now have %-escapes decoded, for consistency with the processing that was already done with other query parameters. Many thanks to everyone who contributed patches, bug reports, and feedback that went into this release! -Ben tornado-6.1.0/docs/releases/v1.0.1.rst000066400000000000000000000004061374705040500173010ustar00rootroot00000000000000What's new in Tornado 1.0.1 =========================== Aug 13, 2010 ------------ :: This release fixes a bug with RequestHandler.get_secure_cookie, which would in some circumstances allow an attacker to tamper with data stored in the cookie. tornado-6.1.0/docs/releases/v1.1.0.rst000066400000000000000000000052051374705040500173030ustar00rootroot00000000000000What's new in Tornado 1.1 ========================= Sep 7, 2010 ----------- :: We are pleased to announce the release of Tornado 1.1, available from https://github.com/downloads/facebook/tornado/tornado-1.1.tar.gz Changes in this release: * RequestHandler.async_callback and related functions in other classes are no longer needed in most cases (although it's harmless to continue using them). Uncaught exceptions will now cause the request to be closed even in a callback. If you're curious how this works, see the new tornado.stack_context module. * The new tornado.testing module contains support for unit testing asynchronous IOLoop-based code. * AsyncHTTPClient has been rewritten (the new implementation was available as AsyncHTTPClient2 in Tornado 1.0; both names are supported for backwards compatibility). * The tornado.auth module has had a number of updates, including support for OAuth 2.0 and the Facebook Graph API, and upgrading Twitter and Google support to OAuth 1.0a. * The websocket module is back and supports the latest version (76) of the websocket protocol. Note that this module's interface is different from the websocket module that appeared in pre-1.0 versions of Tornado. * New method RequestHandler.initialize() can be overridden in subclasses to simplify handling arguments from URLSpecs. The sequence of methods called during initialization is documented at http://tornadoweb.org/documentation#overriding-requesthandler-methods * get_argument() and related methods now work on PUT requests in addition to POST. * The httpclient module now supports HTTP proxies. * When HTTPServer is run in SSL mode, the SSL handshake is now non-blocking. * Many smaller bug fixes and documentation updates Backwards-compatibility notes: * While most users of Tornado should not have to deal with the stack_context module directly, users of worker thread pools and similar constructs may need to use stack_context.wrap and/or NullContext to avoid memory leaks. * The new AsyncHTTPClient still works with libcurl version 7.16.x, but it performs better when both libcurl and pycurl are at least version 7.18.2. * OAuth transactions started under previous versions of the auth module cannot be completed under the new module. This applies only to the initial authorization process; once an authorized token is issued that token works with either version. Many thanks to everyone who contributed patches, bug reports, and feedback that went into this release! -Ben tornado-6.1.0/docs/releases/v1.1.1.rst000066400000000000000000000015511374705040500173040ustar00rootroot00000000000000What's new in Tornado 1.1.1 =========================== Feb 8, 2011 ----------- :: Tornado 1.1.1 is a BACKWARDS-INCOMPATIBLE security update that fixes an XSRF vulnerability. It is available at https://github.com/downloads/facebook/tornado/tornado-1.1.1.tar.gz This is a backwards-incompatible change. Applications that previously relied on a blanket exception for XMLHTTPRequest may need to be modified to explicitly include the XSRF token when making ajax requests. The tornado chat demo application demonstrates one way of adding this token (specifically the function postJSON in demos/chat/static/chat.js). More information about this change and its justification can be found at http://www.djangoproject.com/weblog/2011/feb/08/security/ http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails tornado-6.1.0/docs/releases/v1.2.0.rst000066400000000000000000000131571374705040500173110ustar00rootroot00000000000000What's new in Tornado 1.2 ========================= Feb 20, 2011 ------------ :: We are pleased to announce the release of Tornado 1.2, available from https://github.com/downloads/facebook/tornado/tornado-1.2.tar.gz Backwards compatibility notes: * This release includes the backwards-incompatible security change from version 1.1.1. Users upgrading from 1.1 or earlier should read the release notes from that release: http://groups.google.com/group/python-tornado/browse_thread/thread/b36191c781580cde * StackContexts that do something other than catch exceptions may need to be modified to be reentrant. https://github.com/tornadoweb/tornado/commit/7a7e24143e77481d140fb5579bc67e4c45cbcfad * When XSRF tokens are used, the token must also be present on PUT and DELETE requests (anything but GET and HEAD) New features: * A new HTTP client implementation is available in the module tornado.simple_httpclient. This HTTP client does not depend on pycurl. It has not yet been tested extensively in production, but is intended to eventually replace the pycurl-based HTTP client in a future release of Tornado. To transparently replace tornado.httpclient.AsyncHTTPClient with this new implementation, you can set the environment variable USE_SIMPLE_HTTPCLIENT=1 (note that the next release of Tornado will likely include a different way to select HTTP client implementations) * Request logging is now done by the Application rather than the RequestHandler. Logging behavior may be customized by either overriding Application.log_request in a subclass or by passing log_function as an Application setting * Application.listen(port): Convenience method as an alternative to explicitly creating an HTTPServer * tornado.escape.linkify(): Wrap urls in tags * RequestHandler.create_signed_value(): Create signatures like the secure_cookie methods without setting cookies. * tornado.testing.get_unused_port(): Returns a port selected in the same way as inAsyncHTTPTestCase * AsyncHTTPTestCase.fetch(): Convenience method for synchronous fetches * IOLoop.set_blocking_signal_threshold(): Set a callback to be run when the IOLoop is blocked. * IOStream.connect(): Asynchronously connect a client socket * AsyncHTTPClient.handle_callback_exception(): May be overridden in subclass for custom error handling * httpclient.HTTPRequest has two new keyword arguments, validate_cert and ca_certs. Setting validate_cert=False will disable all certificate checks when fetching https urls. ca_certs may be set to a filename containing trusted certificate authorities (defaults will be used if this is unspecified) * HTTPRequest.get_ssl_certificate(): Returns the client's SSL certificate (if client certificates were requested in the server's ssl_options * StaticFileHandler can be configured to return a default file (e.g. index.html) when a directory is requested * Template directives of the form "{% from x import y %}" are now supported (in addition to the existing support for "{% import x %}" * FacebookGraphMixin.get_authenticated_user now accepts a new parameter 'extra_fields' which may be used to request additional information about the user Bug fixes: * auth: Fixed KeyError with Facebook offline_access * auth: Uses request.uri instead of request.path as the default redirect so that parameters are preserved. * escape: xhtml_escape() now returns a unicode string, not utf8-encoded bytes * ioloop: Callbacks added with add_callback are now run in the order they were added * ioloop: PeriodicCallback.stop can now be called from inside the callback. * iostream: Fixed several bugs in SSLIOStream * iostream: Detect when the other side has closed the connection even with the select()-based IOLoop * iostream: read_bytes(0) now works as expected * iostream: Fixed bug when writing large amounts of data on windows * iostream: Fixed infinite loop that could occur with unhandled exceptions * httpclient: Fix bugs when some requests use proxies and others don't * httpserver: HTTPRequest.protocol is now set correctly when using the built-in SSL support * httpserver: When using multiple processes, the standard library's random number generator is re-seeded in each child process * httpserver: With xheaders enabled, X-Forwarded-Proto is supported as an alternative to X-Scheme * httpserver: Fixed bugs in multipart/form-data parsing * locale: format_date() now behaves sanely with dates in the future * locale: Updates to the language list * stack_context: Fixed bug with contexts leaking through reused IOStreams * stack_context: Simplified semantics and improved performance * web: The order of css_files from UIModules is now preserved * web: Fixed error with default_host redirect * web: StaticFileHandler works when os.path.sep != '/' (i.e. on Windows) * web: Fixed a caching-related bug in StaticFileHandler when a file's timestamp has changed but its contents have not. * web: Fixed bugs with HEAD requests and e.g. Etag headers * web: Fix bugs when different handlers have different static_paths * web: @removeslash will no longer cause a redirect loop when applied to the root path * websocket: Now works over SSL * websocket: Improved compatibility with proxies Many thanks to everyone who contributed patches, bug reports, and feedback that went into this release! -Ben tornado-6.1.0/docs/releases/v1.2.1.rst000066400000000000000000000013611374705040500173040ustar00rootroot00000000000000What's new in Tornado 1.2.1 =========================== Mar 3, 2011 ----------- :: We are pleased to announce the release of Tornado 1.2.1, available from https://github.com/downloads/facebook/tornado/tornado-1.2.1.tar.gz This release contains only two small changes relative to version 1.2: * FacebookGraphMixin has been updated to work with a recent change to the Facebook API. * Running "setup.py install" will no longer attempt to automatically install pycurl. This wasn't working well on platforms where the best way to install pycurl is via something like apt-get instead of easy_install. This is an important upgrade if you are using FacebookGraphMixin, but otherwise it can be safely ignored. tornado-6.1.0/docs/releases/v2.0.0.rst000066400000000000000000000052161374705040500173050ustar00rootroot00000000000000What's new in Tornado 2.0 ========================= Jun 21, 2011 ------------ :: Major changes: * Template output is automatically escaped by default; see backwards compatibility note below. * The default AsyncHTTPClient implementation is now simple_httpclient. * Python 3.2 is now supported. Backwards compatibility: * Template autoescaping is enabled by default. Applications upgrading from a previous release of Tornado must either disable autoescaping or adapt their templates to work with it. For most applications, the simplest way to do this is to pass autoescape=None to the Application constructor. Note that this affects certain built-in methods, e.g. xsrf_form_html and linkify, which must now be called with {% raw %} instead of {} * Applications that wish to continue using curl_httpclient instead of simple_httpclient may do so by calling AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") at the beginning of the process. Users of Python 2.5 will probably want to use curl_httpclient as simple_httpclient only supports ssl on Python 2.6+. * Python 3 compatibility involved many changes throughout the codebase, so users are encouraged to test their applications more thoroughly than usual when upgrading to this release. Other changes in this release: * Templates support several new directives: - {% autoescape ...%} to control escaping behavior - {% raw ... %} for unescaped output - {% module ... %} for calling UIModules * {% module Template(path, **kwargs) %} may now be used to call another template with an independent namespace * All IOStream callbacks are now run directly on the IOLoop via add_callback. * HTTPServer now supports IPv6 where available. To disable, pass family=socket.AF_INET to HTTPServer.bind(). * HTTPClient now supports IPv6, configurable via allow_ipv6=bool on the HTTPRequest. allow_ipv6 defaults to false on simple_httpclient and true on curl_httpclient. * RequestHandlers can use an encoding other than utf-8 for query parameters by overriding decode_argument() * Performance improvements, especially for applications that use a lot of IOLoop timeouts * HTTP OPTIONS method no longer requires an XSRF token. * JSON output (RequestHandler.write(dict)) now sets Content-Type to application/json * Etag computation can now be customized or disabled by overriding RequestHandler.compute_etag * USE_SIMPLE_HTTPCLIENT environment variable is no longer supported. Use AsyncHTTPClient.configure instead. tornado-6.1.0/docs/releases/v2.1.0.rst000066400000000000000000000166301374705040500173100ustar00rootroot00000000000000What's new in Tornado 2.1 ========================= Sep 20, 2011 ------------ Backwards-incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Support for secure cookies written by pre-1.0 releases of Tornado has been removed. The `.RequestHandler.get_secure_cookie` method no longer takes an ``include_name`` parameter. * The ``debug`` application setting now causes stack traces to be displayed in the browser on uncaught exceptions. Since this may leak sensitive information, debug mode is not recommended for public-facing servers. Security fixes ~~~~~~~~~~~~~~ * Diginotar has been removed from the default CA certificates file used by ``SimpleAsyncHTTPClient``. New modules ~~~~~~~~~~~ * `tornado.gen`: A generator-based interface to simplify writing asynchronous functions. * `tornado.netutil`: Parts of `tornado.httpserver` have been extracted into a new module for use with non-HTTP protocols. * `tornado.platform.twisted`: A bridge between the Tornado IOLoop and the Twisted Reactor, allowing code written for Twisted to be run on Tornado. * `tornado.process`: Multi-process mode has been improved, and can now restart crashed child processes. A new entry point has been added at `tornado.process.fork_processes`, although ``tornado.httpserver.HTTPServer.start`` is still supported. ``tornado.web`` ~~~~~~~~~~~~~~~ * `tornado.web.RequestHandler.write_error` replaces ``get_error_html`` as the preferred way to generate custom error pages (``get_error_html`` is still supported, but deprecated) * In `tornado.web.Application`, handlers may be specified by (fully-qualified) name instead of importing and passing the class object itself. * It is now possible to use a custom subclass of ``StaticFileHandler`` with the ``static_handler_class`` application setting, and this subclass can override the behavior of the ``static_url`` method. * `~tornado.web.StaticFileHandler` subclasses can now override ``get_cache_time`` to customize cache control behavior. * `tornado.web.RequestHandler.get_secure_cookie` now has a ``max_age_days`` parameter to allow applications to override the default one-month expiration. * `~tornado.web.RequestHandler.set_cookie` now accepts a ``max_age`` keyword argument to set the ``max-age`` cookie attribute (note underscore vs dash) * `tornado.web.RequestHandler.set_default_headers` may be overridden to set headers in a way that does not get reset during error handling. * `.RequestHandler.add_header` can now be used to set a header that can appear multiple times in the response. * `.RequestHandler.flush` can now take a callback for flow control. * The ``application/json`` content type can now be gzipped. * The cookie-signing functions are now accessible as static functions ``tornado.web.create_signed_value`` and ``tornado.web.decode_signed_value``. ``tornado.httpserver`` ~~~~~~~~~~~~~~~~~~~~~~ * To facilitate some advanced multi-process scenarios, ``HTTPServer`` has a new method ``add_sockets``, and socket-opening code is available separately as `tornado.netutil.bind_sockets`. * The ``cookies`` property is now available on ``tornado.httpserver.HTTPRequest`` (it is also available in its old location as a property of `~tornado.web.RequestHandler`) * ``tornado.httpserver.HTTPServer.bind`` now takes a backlog argument with the same meaning as ``socket.listen``. * `~tornado.httpserver.HTTPServer` can now be run on a unix socket as well as TCP. * Fixed exception at startup when ``socket.AI_ADDRCONFIG`` is not available, as on Windows XP ``IOLoop`` and ``IOStream`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * `~tornado.iostream.IOStream` performance has been improved, especially for small synchronous requests. * New methods ``tornado.iostream.IOStream.read_until_close`` and ``tornado.iostream.IOStream.read_until_regex``. * ``IOStream.read_bytes`` and ``IOStream.read_until_close`` now take a ``streaming_callback`` argument to return data as it is received rather than all at once. * `.IOLoop.add_timeout` now accepts `datetime.timedelta` objects in addition to absolute timestamps. * `~tornado.ioloop.PeriodicCallback` now sticks to the specified period instead of creeping later due to accumulated errors. * `tornado.ioloop.IOLoop` and `tornado.httpclient.HTTPClient` now have ``close()`` methods that should be used in applications that create and destroy many of these objects. * `.IOLoop.install` can now be used to use a custom subclass of IOLoop as the singleton without monkey-patching. * `~tornado.iostream.IOStream` should now always call the close callback instead of the connect callback on a connection error. * The `.IOStream` close callback will no longer be called while there are pending read callbacks that can be satisfied with buffered data. ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Now supports client SSL certificates with the ``client_key`` and ``client_cert`` parameters to `tornado.httpclient.HTTPRequest` * Now takes a maximum buffer size, to allow reading files larger than 100MB * Now works with HTTP 1.0 servers that don't send a Content-Length header * The ``allow_nonstandard_methods`` flag on HTTP client requests now permits methods other than ``POST`` and ``PUT`` to contain bodies. * Fixed file descriptor leaks and multiple callback invocations in ``SimpleAsyncHTTPClient`` * No longer consumes extra connection resources when following redirects. * Now works with buggy web servers that separate headers with ``\n`` instead of ``\r\n\r\n``. * Now sets ``response.request_time`` correctly. * Connect timeouts now work correctly. Other modules ~~~~~~~~~~~~~ * `tornado.auth.OpenIdMixin` now uses the correct realm when the callback URI is on a different domain. * `tornado.autoreload` has a new command-line interface which can be used to wrap any script. This replaces the ``--autoreload`` argument to `tornado.testing.main` and is more robust against syntax errors. * `tornado.autoreload.watch` can be used to watch files other than the sources of imported modules. * ``tornado.database.Connection`` has new variants of ``execute`` and ``executemany`` that return the number of rows affected instead of the last inserted row id. * `tornado.locale.load_translations` now accepts any properly-formatted locale name, not just those in the predefined ``LOCALE_NAMES`` list. * `tornado.options.define` now takes a ``group`` parameter to group options in ``--help`` output. * Template loaders now take a ``namespace`` constructor argument to add entries to the template namespace. * `tornado.websocket` now supports the latest ("hybi-10") version of the protocol (the old version, "hixie-76" is still supported; the correct version is detected automatically). * `tornado.websocket` now works on Python 3 Bug fixes ~~~~~~~~~ * Windows support has been improved. Windows is still not an officially supported platform, but the test suite now passes and `tornado.autoreload` works. * Uploading files whose names contain special characters will now work. * Cookie values containing special characters are now properly quoted and unquoted. * Multi-line headers are now supported. * Repeated Content-Length headers (which may be added by certain proxies) are now supported in `.HTTPServer`. * Unicode string literals now work in template expressions. * The template ``{% module %}`` directive now works even if applications use a template variable named ``modules``. * Requests with "Expect: 100-continue" now work on python 3 tornado-6.1.0/docs/releases/v2.1.1.rst000066400000000000000000000021361374705040500173050ustar00rootroot00000000000000What's new in Tornado 2.1.1 =========================== Oct 4, 2011 ----------- Bug fixes ~~~~~~~~~ * Fixed handling of closed connections with the ``epoll`` (i.e. Linux) ``IOLoop``. Previously, closed connections could be shut down too early, which most often manifested as "Stream is closed" exceptions in ``SimpleAsyncHTTPClient``. * Fixed a case in which chunked responses could be closed prematurely, leading to truncated output. * ``IOStream.connect`` now reports errors more consistently via logging and the close callback (this affects e.g. connections to localhost on FreeBSD). * ``IOStream.read_bytes`` again accepts both ``int`` and ``long`` arguments. * ``PeriodicCallback`` no longer runs repeatedly when ``IOLoop`` iterations complete faster than the resolution of ``time.time()`` (mainly a problem on Windows). Backwards-compatibility note ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Listening for ``IOLoop.ERROR`` alone is no longer sufficient for detecting closed connections on an otherwise unused socket. ``IOLoop.ERROR`` must always be used in combination with ``READ`` or ``WRITE``. tornado-6.1.0/docs/releases/v2.2.0.rst000066400000000000000000000127171374705040500173130ustar00rootroot00000000000000What's new in Tornado 2.2 ========================= Jan 30, 2012 ------------ Highlights ~~~~~~~~~~ * Updated and expanded WebSocket support. * Improved compatibility in the Twisted/Tornado bridge. * Template errors now generate better stack traces. * Better exception handling in `tornado.gen`. Security fixes ~~~~~~~~~~~~~~ * ``tornado.simple_httpclient`` now disables SSLv2 in all cases. Previously SSLv2 would be allowed if the Python interpreter was linked against a pre-1.0 version of OpenSSL. Backwards-incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * `tornado.process.fork_processes` now raises `SystemExit` if all child processes exit cleanly rather than returning ``None``. The old behavior was surprising and inconsistent with most of the documented examples of this function (which did not check the return value). * On Python 2.6, ``tornado.simple_httpclient`` only supports SSLv3. This is because Python 2.6 does not expose a way to support both SSLv3 and TLSv1 without also supporting the insecure SSLv2. * `tornado.websocket` no longer supports the older "draft 76" version of the websocket protocol by default, although this version can be enabled by overriding ``tornado.websocket.WebSocketHandler.allow_draft76``. ``tornado.httpclient`` ~~~~~~~~~~~~~~~~~~~~~~ * ``SimpleAsyncHTTPClient`` no longer hangs on ``HEAD`` requests, responses with no content, or empty ``POST``/``PUT`` response bodies. * ``SimpleAsyncHTTPClient`` now supports 303 and 307 redirect codes. * ``tornado.curl_httpclient`` now accepts non-integer timeouts. * ``tornado.curl_httpclient`` now supports basic authentication with an empty password. ``tornado.httpserver`` ~~~~~~~~~~~~~~~~~~~~~~ * `.HTTPServer` with ``xheaders=True`` will no longer accept ``X-Real-IP`` headers that don't look like valid IP addresses. * `.HTTPServer` now treats the ``Connection`` request header as case-insensitive. ``tornado.ioloop`` and ``tornado.iostream`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``IOStream.write`` now works correctly when given an empty string. * ``IOStream.read_until`` (and ``read_until_regex``) now perform better when there is a lot of buffered data, which improves performance of ``SimpleAsyncHTTPClient`` when downloading files with lots of chunks. * `.SSLIOStream` now works correctly when ``ssl_version`` is set to a value other than ``SSLv23``. * Idle ``IOLoops`` no longer wake up several times a second. * `tornado.ioloop.PeriodicCallback` no longer triggers duplicate callbacks when stopped and started repeatedly. ``tornado.template`` ~~~~~~~~~~~~~~~~~~~~ * Exceptions in template code will now show better stack traces that reference lines from the original template file. * ``{#`` and ``#}`` can now be used for comments (and unlike the old ``{% comment %}`` directive, these can wrap other template directives). * Template directives may now span multiple lines. ``tornado.web`` ~~~~~~~~~~~~~~~ * Now behaves better when given malformed ``Cookie`` headers * `.RequestHandler.redirect` now has a ``status`` argument to send status codes other than 301 and 302. * New method `.RequestHandler.on_finish` may be overridden for post-request processing (as a counterpart to `.RequestHandler.prepare`) * `.StaticFileHandler` now outputs ``Content-Length`` and ``Etag`` headers on ``HEAD`` requests. * `.StaticFileHandler` now has overridable ``get_version`` and ``parse_url_path`` methods for use in subclasses. * `.RequestHandler.static_url` now takes an ``include_host`` parameter (in addition to the old support for the ``RequestHandler.include_host`` attribute). ``tornado.websocket`` ~~~~~~~~~~~~~~~~~~~~~ * Updated to support the latest version of the protocol, as finalized in RFC 6455. * Many bugs were fixed in all supported protocol versions. * `tornado.websocket` no longer supports the older "draft 76" version of the websocket protocol by default, although this version can be enabled by overriding ``tornado.websocket.WebSocketHandler.allow_draft76``. * `.WebSocketHandler.write_message` now accepts a ``binary`` argument to send binary messages. * Subprotocols (i.e. the ``Sec-WebSocket-Protocol`` header) are now supported; see the `.WebSocketHandler.select_subprotocol` method for details. * ``.WebSocketHandler.get_websocket_scheme`` can be used to select the appropriate url scheme (``ws://`` or ``wss://``) in cases where ``HTTPRequest.protocol`` is not set correctly. Other modules ~~~~~~~~~~~~~ * `tornado.auth.TwitterMixin.authenticate_redirect` now takes a ``callback_uri`` parameter. * `tornado.auth.TwitterMixin.twitter_request` now accepts both URLs and partial paths (complete URLs are useful for the search API which follows different patterns). * Exception handling in `tornado.gen` has been improved. It is now possible to catch exceptions thrown by a ``Task``. * `tornado.netutil.bind_sockets` now works when ``getaddrinfo`` returns duplicate addresses. * `tornado.platform.twisted` compatibility has been significantly improved. Twisted version 11.1.0 is now supported in addition to 11.0.0. * `tornado.process.fork_processes` correctly reseeds the `random` module even when `os.urandom` is not implemented. * `tornado.testing.main` supports a new flag ``--exception_on_interrupt``, which can be set to false to make ``Ctrl-C`` kill the process more reliably (at the expense of stack traces when it does so). * ``tornado.version_info`` is now a four-tuple so official releases can be distinguished from development branches. tornado-6.1.0/docs/releases/v2.2.1.rst000066400000000000000000000010751374705040500173070ustar00rootroot00000000000000What's new in Tornado 2.2.1 =========================== Apr 23, 2012 ------------ Security fixes ~~~~~~~~~~~~~~ * `tornado.web.RequestHandler.set_header` now properly sanitizes input values to protect against header injection, response splitting, etc. (it has always attempted to do this, but the check was incorrect). Note that redirects, the most likely source of such bugs, are protected by a separate check in `.RequestHandler.redirect`. Bug fixes ~~~~~~~~~ * Colored logging configuration in `tornado.options` is compatible with Python 3.2.3 (and 3.3). tornado-6.1.0/docs/releases/v2.3.0.rst000066400000000000000000000117251374705040500173120ustar00rootroot00000000000000What's new in Tornado 2.3 ========================= May 31, 2012 ------------ HTTP clients ~~~~~~~~~~~~ * `tornado.httpclient.HTTPClient` now supports the same constructor keyword arguments as `.AsyncHTTPClient`. * The ``max_clients`` keyword argument to `.AsyncHTTPClient.configure` now works. * ``tornado.simple_httpclient`` now supports the ``OPTIONS`` and ``PATCH`` HTTP methods. * ``tornado.simple_httpclient`` is better about closing its sockets instead of leaving them for garbage collection. * ``tornado.simple_httpclient`` correctly verifies SSL certificates for URLs containing IPv6 literals (This bug affected Python 2.5 and 2.6). * ``tornado.simple_httpclient`` no longer includes basic auth credentials in the ``Host`` header when those credentials are extracted from the URL. * ``tornado.simple_httpclient`` no longer modifies the caller-supplied header dictionary, which caused problems when following redirects. * ``tornado.curl_httpclient`` now supports client SSL certificates (using the same ``client_cert`` and ``client_key`` arguments as ``tornado.simple_httpclient``) HTTP Server ~~~~~~~~~~~ * `.HTTPServer` now works correctly with paths starting with ``//`` * ``HTTPHeaders.copy`` (inherited from `dict.copy`) now works correctly. * ``HTTPConnection.address`` is now always the socket address, even for non-IP sockets. ``HTTPRequest.remote_ip`` is still always an IP-style address (fake data is used for non-IP sockets) * Extra data at the end of multipart form bodies is now ignored, which fixes a compatibility problem with an iOS HTTP client library. ``IOLoop`` and ``IOStream`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * `.IOStream` now has an ``error`` attribute that can be used to determine why a socket was closed. * ``tornado.iostream.IOStream.read_until`` and ``read_until_regex`` are much faster with large input. * ``IOStream.write`` performs better when given very large strings. * `.IOLoop.instance()` is now thread-safe. ``tornado.options`` ~~~~~~~~~~~~~~~~~~~ * `tornado.options` options with ``multiple=True`` that are set more than once now overwrite rather than append. This makes it possible to override values set in ``parse_config_file`` with ``parse_command_line``. * `tornado.options` ``--help`` output is now prettier. * `tornado.options.options` now supports attribute assignment. ``tornado.template`` ~~~~~~~~~~~~~~~~~~~~ * Template files containing non-ASCII (utf8) characters now work on Python 3 regardless of the locale environment variables. * Templates now support ``else`` clauses in ``try``/``except``/``finally``/``else`` blocks. ``tornado.web`` ~~~~~~~~~~~~~~~ * `tornado.web.RequestHandler` now supports the ``PATCH`` HTTP method. Note that this means any existing methods named ``patch`` in ``RequestHandler`` subclasses will need to be renamed. * `tornado.web.addslash` and ``removeslash`` decorators now send permanent redirects (301) instead of temporary (302). * `.RequestHandler.flush` now invokes its callback whether there was any data to flush or not. * Repeated calls to `.RequestHandler.set_cookie` with the same name now overwrite the previous cookie instead of producing additional copies. * ``tornado.web.OutputTransform.transform_first_chunk`` now takes and returns a status code in addition to the headers and chunk. This is a backwards-incompatible change to an interface that was never technically private, but was not included in the documentation and does not appear to have been used outside Tornado itself. * Fixed a bug on python versions before 2.6.5 when `tornado.web.URLSpec` regexes are constructed from unicode strings and keyword arguments are extracted. * The ``reverse_url`` function in the template namespace now comes from the `.RequestHandler` rather than the `.Application`. (Unless overridden, `.RequestHandler.reverse_url` is just an alias for the `.Application` method). * The ``Etag`` header is now returned on 304 responses to an ``If-None-Match`` request, improving compatibility with some caches. * `tornado.web` will no longer produce responses with status code 304 that also have entity headers such as ``Content-Length``. Other modules ~~~~~~~~~~~~~ * `tornado.auth.FacebookGraphMixin` no longer sends ``post_args`` redundantly in the url. * The ``extra_params`` argument to `tornado.escape.linkify` may now be a callable, to allow parameters to be chosen separately for each link. * `tornado.gen` no longer leaks ``StackContexts`` when a ``@gen.engine`` wrapped function is called repeatedly. * `tornado.locale.get_supported_locales` no longer takes a meaningless ``cls`` argument. * ``StackContext`` instances now have a deactivation callback that can be used to prevent further propagation. * `tornado.testing.AsyncTestCase.wait` now resets its timeout on each call. * ``tornado.wsgi.WSGIApplication`` now parses arguments correctly on Python 3. * Exception handling on Python 3 has been improved; previously some exceptions such as `UnicodeDecodeError` would generate ``TypeErrors`` tornado-6.1.0/docs/releases/v2.4.0.rst000066400000000000000000000055051374705040500173120ustar00rootroot00000000000000What's new in Tornado 2.4 ========================= Sep 4, 2012 ----------- General ~~~~~~~ * Fixed Python 3 bugs in `tornado.auth`, `tornado.locale`, and `tornado.wsgi`. HTTP clients ~~~~~~~~~~~~ * Removed ``max_simultaneous_connections`` argument from `tornado.httpclient` (both implementations). This argument hasn't been useful for some time (if you were using it you probably want ``max_clients`` instead) * ``tornado.simple_httpclient`` now accepts and ignores HTTP 1xx status responses. `tornado.ioloop` and `tornado.iostream` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Fixed a bug introduced in 2.3 that would cause `.IOStream` close callbacks to not run if there were pending reads. * Improved error handling in `.SSLIOStream` and SSL-enabled `.TCPServer`. * ``SSLIOStream.get_ssl_certificate`` now has a ``binary_form`` argument which is passed to ``SSLSocket.getpeercert``. * ``SSLIOStream.write`` can now be called while the connection is in progress, same as non-SSL `.IOStream` (but be careful not to send sensitive data until the connection has completed and the certificate has been verified). * `.IOLoop.add_handler` cannot be called more than once with the same file descriptor. This was always true for ``epoll``, but now the other implementations enforce it too. * On Windows, `.TCPServer` uses ``SO_EXCLUSIVEADDRUSER`` instead of ``SO_REUSEADDR``. `tornado.template` ~~~~~~~~~~~~~~~~~~ * ``{% break %}`` and ``{% continue %}`` can now be used looping constructs in templates. * It is no longer an error for an if/else/for/etc block in a template to have an empty body. `tornado.testing` ~~~~~~~~~~~~~~~~~ * New class `tornado.testing.AsyncHTTPSTestCase` is like `.AsyncHTTPTestCase`. but enables SSL for the testing server (by default using a self-signed testing certificate). * `tornado.testing.main` now accepts additional keyword arguments and forwards them to `unittest.main`. `tornado.web` ~~~~~~~~~~~~~ * New method `.RequestHandler.get_template_namespace` can be overridden to add additional variables without modifying keyword arguments to ``render_string``. * `.RequestHandler.add_header` now works with ``WSGIApplication``. * `.RequestHandler.get_secure_cookie` now handles a potential error case. * ``RequestHandler.__init__`` now calls ``super().__init__`` to ensure that all constructors are called when multiple inheritance is used. * Docs have been updated with a description of all available :py:attr:`Application settings ` Other modules ~~~~~~~~~~~~~ * `.OAuthMixin` now accepts ``"oob"`` as a ``callback_uri``. * `.OpenIdMixin` now also returns the ``claimed_id`` field for the user. * `tornado.platform.twisted` shutdown sequence is now more compatible. * The logging configuration used in `tornado.options` is now more tolerant of non-ascii byte strings. tornado-6.1.0/docs/releases/v2.4.1.rst000066400000000000000000000007231374705040500173100ustar00rootroot00000000000000What's new in Tornado 2.4.1 =========================== Nov 24, 2012 ------------ Bug fixes ~~~~~~~~~ * Fixed a memory leak in ``tornado.stack_context`` that was especially likely with long-running ``@gen.engine`` functions. * `tornado.auth.TwitterMixin` now works on Python 3. * Fixed a bug in which ``IOStream.read_until_close`` with a streaming callback would sometimes pass the last chunk of data to the final callback instead of the streaming callback. tornado-6.1.0/docs/releases/v3.0.0.rst000066400000000000000000000511451374705040500173100ustar00rootroot00000000000000What's new in Tornado 3.0 ========================= Mar 29, 2013 ------------ Highlights ^^^^^^^^^^ * The ``callback`` argument to many asynchronous methods is now optional, and these methods return a `.Future`. The `tornado.gen` module now understands ``Futures``, and these methods can be used directly without a ``gen.Task`` wrapper. * New function `.IOLoop.current` returns the `.IOLoop` that is running on the current thread (as opposed to `.IOLoop.instance`, which returns a specific thread's (usually the main thread's) IOLoop. * New class `tornado.netutil.Resolver` provides an asynchronous interface to DNS resolution. The default implementation is still blocking, but non-blocking implementations are available using one of three optional dependencies: `~tornado.netutil.ThreadedResolver` using the `concurrent.futures` thread pool, ``tornado.platform.caresresolver.CaresResolver`` using the ``pycares`` library, or ``tornado.platform.twisted.TwistedResolver`` using ``twisted`` * Tornado's logging is now less noisy, and it no longer goes directly to the root logger, allowing for finer-grained configuration. * New class `tornado.process.Subprocess` wraps `subprocess.Popen` with `.PipeIOStream` access to the child's file descriptors. * `.IOLoop` now has a static `configure <.Configurable.configure>` method like the one on `.AsyncHTTPClient`, which can be used to select an `.IOLoop` implementation other than the default. * `.IOLoop` can now optionally use a monotonic clock if available (see below for more details). Backwards-incompatible changes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Python 2.5 is no longer supported. Python 3 is now supported in a single codebase instead of using ``2to3`` * The ``tornado.database`` module has been removed. It is now available as a separate package, `torndb `_ * Functions that take an ``io_loop`` parameter now default to `.IOLoop.current()` instead of `.IOLoop.instance()`. * Empty HTTP request arguments are no longer ignored. This applies to ``HTTPRequest.arguments`` and ``RequestHandler.get_argument[s]`` in WSGI and non-WSGI modes. * On Python 3, `tornado.escape.json_encode` no longer accepts byte strings. * On Python 3, the ``get_authenticated_user`` methods in `tornado.auth` now return character strings instead of byte strings. * ``tornado.netutil.TCPServer`` has moved to its own module, `tornado.tcpserver`. * The Tornado test suite now requires ``unittest2`` when run on Python 2.6. * `tornado.options.options` is no longer a subclass of `dict`; attribute-style access is now required. Detailed changes by module ^^^^^^^^^^^^^^^^^^^^^^^^^^ Multiple modules ~~~~~~~~~~~~~~~~ * Tornado no longer logs to the root logger. Details on the new logging scheme can be found under the `tornado.log` module. Note that in some cases this will require that you add an explicit logging configuration in order to see any output (perhaps just calling ``logging.basicConfig()``), although both `.IOLoop.start()` and `tornado.options.parse_command_line` will do this for you. * On python 3.2+, methods that take an ``ssl_options`` argument (on `.SSLIOStream`, `.TCPServer`, and `.HTTPServer`) now accept either a dictionary of options or an `ssl.SSLContext` object. * New optional dependency on `concurrent.futures` to provide better support for working with threads. `concurrent.futures` is in the standard library for Python 3.2+, and can be installed on older versions with ``pip install futures``. `tornado.autoreload` ~~~~~~~~~~~~~~~~~~~~ * `tornado.autoreload` is now more reliable when there are errors at import time. * Calling `tornado.autoreload.start` (or creating an `.Application` with ``debug=True``) twice on the same `.IOLoop` now does nothing (instead of creating multiple periodic callbacks). Starting autoreload on more than one `.IOLoop` in the same process now logs a warning. * Scripts run by autoreload no longer inherit ``__future__`` imports used by Tornado. `tornado.auth` ~~~~~~~~~~~~~~ * On Python 3, the ``get_authenticated_user`` method family now returns character strings instead of byte strings. * Asynchronous methods defined in `tornado.auth` now return a `.Future`, and their ``callback`` argument is optional. The ``Future`` interface is preferred as it offers better error handling (the previous interface just logged a warning and returned None). * The `tornado.auth` mixin classes now define a method ``get_auth_http_client``, which can be overridden to use a non-default `.AsyncHTTPClient` instance (e.g. to use a different `.IOLoop`) * Subclasses of `.OAuthMixin` are encouraged to override `.OAuthMixin._oauth_get_user_future` instead of ``_oauth_get_user``, although both methods are still supported. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ * New module `tornado.concurrent` contains code to support working with `concurrent.futures`, or to emulate future-based interface when that module is not available. ``tornado.curl_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Preliminary support for ``tornado.curl_httpclient`` on Python 3. The latest official release of pycurl only supports Python 2, but Ubuntu has a port available in 12.10 (``apt-get install python3-pycurl``). This port currently has bugs that prevent it from handling arbitrary binary data but it should work for textual (utf8) resources. * Fix a crash with libcurl 7.29.0 if a curl object is created and closed without being used. `tornado.escape` ~~~~~~~~~~~~~~~~ * On Python 3, `~tornado.escape.json_encode` no longer accepts byte strings. This mirrors the behavior of the underlying json module. Python 2 behavior is unchanged but should be faster. `tornado.gen` ~~~~~~~~~~~~~ * New decorator ``@gen.coroutine`` is available as an alternative to ``@gen.engine``. It automatically returns a `.Future`, and within the function instead of calling a callback you return a value with ``raise gen.Return(value)`` (or simply ``return value`` in Python 3.3). * Generators may now yield `.Future` objects. * Callbacks produced by ``gen.Callback`` and ``gen.Task`` are now automatically stack-context-wrapped, to minimize the risk of context leaks when used with asynchronous functions that don't do their own wrapping. * Fixed a memory leak involving generators, `.RequestHandler.flush`, and clients closing connections while output is being written. * Yielding a large list no longer has quadratic performance. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * `.AsyncHTTPClient.fetch` now returns a `.Future` and its callback argument is optional. When the future interface is used, any error will be raised automatically, as if `.HTTPResponse.rethrow` was called. * `.AsyncHTTPClient.configure` and all `.AsyncHTTPClient` constructors now take a ``defaults`` keyword argument. This argument should be a dictionary, and its values will be used in place of corresponding attributes of `~tornado.httpclient.HTTPRequest` that are not set. * All unset attributes of `tornado.httpclient.HTTPRequest` are now ``None``. The default values of some attributes (``connect_timeout``, ``request_timeout``, ``follow_redirects``, ``max_redirects``, ``use_gzip``, ``proxy_password``, ``allow_nonstandard_methods``, and ``validate_cert`` have been moved from `~tornado.httpclient.HTTPRequest` to the client implementations. * The ``max_clients`` argument to `.AsyncHTTPClient` is now a keyword-only argument. * Keyword arguments to `.AsyncHTTPClient.configure` are no longer used when instantiating an implementation subclass directly. * Secondary `.AsyncHTTPClient` callbacks (``streaming_callback``, ``header_callback``, and ``prepare_curl_callback``) now respect ``StackContext``. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * `.HTTPServer` no longer logs an error when it is unable to read a second request from an HTTP 1.1 keep-alive connection. * `.HTTPServer` now takes a ``protocol`` keyword argument which can be set to ``https`` if the server is behind an SSL-decoding proxy that does not set any supported X-headers. * ``tornado.httpserver.HTTPConnection`` now has a ``set_close_callback`` method that should be used instead of reaching into its ``stream`` attribute. * Empty HTTP request arguments are no longer ignored. This applies to ``HTTPRequest.arguments`` and ``RequestHandler.get_argument[s]`` in WSGI and non-WSGI modes. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * New function `.IOLoop.current` returns the ``IOLoop`` that is running on the current thread (as opposed to `.IOLoop.instance`, which returns a specific thread's (usually the main thread's) IOLoop). * New method `.IOLoop.add_future` to run a callback on the IOLoop when an asynchronous `.Future` finishes. * `.IOLoop` now has a static `configure <.Configurable.configure>` method like the one on `.AsyncHTTPClient`, which can be used to select an `.IOLoop` implementation other than the default. * The `.IOLoop` poller implementations (``select``, ``epoll``, ``kqueue``) are now available as distinct subclasses of `.IOLoop`. Instantiating `.IOLoop` will continue to automatically choose the best available implementation. * The `.IOLoop` constructor has a new keyword argument ``time_func``, which can be used to set the time function used when scheduling callbacks. This is most useful with the `time.monotonic` function, introduced in Python 3.3 and backported to older versions via the ``monotime`` module. Using a monotonic clock here avoids problems when the system clock is changed. * New function `.IOLoop.time` returns the current time according to the IOLoop. To use the new monotonic clock functionality, all calls to `.IOLoop.add_timeout` must be either pass a `datetime.timedelta` or a time relative to `.IOLoop.time`, not `time.time`. (`time.time` will continue to work only as long as the IOLoop's ``time_func`` argument is not used). * New convenience method `.IOLoop.run_sync` can be used to start an IOLoop just long enough to run a single coroutine. * New method `.IOLoop.add_callback_from_signal` is safe to use in a signal handler (the regular `.add_callback` method may deadlock). * `.IOLoop` now uses `signal.set_wakeup_fd` where available (Python 2.6+ on Unix) to avoid a race condition that could result in Python signal handlers being delayed. * Method ``IOLoop.running()`` has been removed. * `.IOLoop` has been refactored to better support subclassing. * `.IOLoop.add_callback` and `.add_callback_from_signal` now take ``*args, **kwargs`` to pass along to the callback. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * `.IOStream.connect` now has an optional ``server_hostname`` argument which will be used for SSL certificate validation when applicable. Additionally, when supported (on Python 3.2+), this hostname will be sent via SNI (and this is supported by ``tornado.simple_httpclient``) * Much of `.IOStream` has been refactored into a separate class `.BaseIOStream`. * New class `tornado.iostream.PipeIOStream` provides the IOStream interface on pipe file descriptors. * `.IOStream` now raises a new exception ``tornado.iostream.StreamClosedError`` when you attempt to read or write after the stream has been closed (by either side). * `.IOStream` now simply closes the connection when it gets an ``ECONNRESET`` error, rather than logging it as an error. * ``IOStream.error`` no longer picks up unrelated exceptions. * `.BaseIOStream.close` now has an ``exc_info`` argument (similar to the one used in the `logging` module) that can be used to set the stream's ``error`` attribute when closing it. * `.BaseIOStream.read_until_close` now works correctly when it is called while there is buffered data. * Fixed a major performance regression when run on PyPy (introduced in Tornado 2.3). `tornado.log` ~~~~~~~~~~~~~ * New module containing `.enable_pretty_logging` and `.LogFormatter`, moved from the options module. * `.LogFormatter` now handles non-ascii data in messages and tracebacks better. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * New class `tornado.netutil.Resolver` provides an asynchronous interface to DNS resolution. The default implementation is still blocking, but non-blocking implementations are available using one of three optional dependencies: `~tornado.netutil.ThreadedResolver` using the `concurrent.futures` thread pool, `tornado.platform.caresresolver.CaresResolver` using the ``pycares`` library, or `tornado.platform.twisted.TwistedResolver` using ``twisted`` * New function `tornado.netutil.is_valid_ip` returns true if a given string is a valid IP (v4 or v6) address. * `tornado.netutil.bind_sockets` has a new ``flags`` argument that can be used to pass additional flags to ``getaddrinfo``. * `tornado.netutil.bind_sockets` no longer sets ``AI_ADDRCONFIG``; this will cause it to bind to both ipv4 and ipv6 more often than before. * `tornado.netutil.bind_sockets` now works when Python was compiled with ``--disable-ipv6`` but IPv6 DNS resolution is available on the system. * ``tornado.netutil.TCPServer`` has moved to its own module, `tornado.tcpserver`. `tornado.options` ~~~~~~~~~~~~~~~~~ * The class underlying the functions in `tornado.options` is now public (`tornado.options.OptionParser`). This can be used to create multiple independent option sets, such as for subcommands. * `tornado.options.parse_config_file` now configures logging automatically by default, in the same way that `~tornado.options.parse_command_line` does. * New function `tornado.options.add_parse_callback` schedules a callback to be run after the command line or config file has been parsed. The keyword argument ``final=False`` can be used on either parsing function to suppress these callbacks. * `tornado.options.define` now takes a ``callback`` argument. This callback will be run with the new value whenever the option is changed. This is especially useful for options that set other options, such as by reading from a config file. * `tornado.options.parse_command_line` ``--help`` output now goes to ``stderr`` rather than ``stdout``. * `tornado.options.options` is no longer a subclass of `dict`; attribute-style access is now required. * `tornado.options.options` (and `.OptionParser` instances generally) now have a `.mockable()` method that returns a wrapper object compatible with `mock.patch `. * Function ``tornado.options.enable_pretty_logging`` has been moved to the `tornado.log` module. `tornado.platform.caresresolver` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * New module containing an asynchronous implementation of the `.Resolver` interface, using the ``pycares`` library. `tornado.platform.twisted` ~~~~~~~~~~~~~~~~~~~~~~~~~~ * New class ``tornado.platform.twisted.TwistedIOLoop`` allows Tornado code to be run on the Twisted reactor (as opposed to the existing ``TornadoReactor``, which bridges the gap in the other direction). * New class `tornado.platform.twisted.TwistedResolver` is an asynchronous implementation of the `.Resolver` interface. `tornado.process` ~~~~~~~~~~~~~~~~~ * New class `tornado.process.Subprocess` wraps `subprocess.Popen` with `.PipeIOStream` access to the child's file descriptors. ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``SimpleAsyncHTTPClient`` now takes a ``resolver`` keyword argument (which may be passed to either the constructor or `configure <.Configurable.configure>`), to allow it to use the new non-blocking `tornado.netutil.Resolver`. * When following redirects, ``SimpleAsyncHTTPClient`` now treats a 302 response code the same as a 303. This is contrary to the HTTP spec but consistent with all browsers and other major HTTP clients (including ``CurlAsyncHTTPClient``). * The behavior of ``header_callback`` with ``SimpleAsyncHTTPClient`` has changed and is now the same as that of ``CurlAsyncHTTPClient``. The header callback now receives the first line of the response (e.g. ``HTTP/1.0 200 OK``) and the final empty line. * ``tornado.simple_httpclient`` now accepts responses with a 304 status code that include a ``Content-Length`` header. * Fixed a bug in which ``SimpleAsyncHTTPClient`` callbacks were being run in the client's ``stack_context``. ``tornado.stack_context`` ~~~~~~~~~~~~~~~~~~~~~~~~~ * ``stack_context.wrap`` now runs the wrapped callback in a more consistent environment by recreating contexts even if they already exist on the stack. * Fixed a bug in which stack contexts could leak from one callback chain to another. * Yield statements inside a ``with`` statement can cause stack contexts to become inconsistent; an exception will now be raised when this case is detected. `tornado.template` ~~~~~~~~~~~~~~~~~~ * Errors while rendering templates no longer log the generated code, since the enhanced stack traces (from version 2.1) should make this unnecessary. * The ``{% apply %}`` directive now works properly with functions that return both unicode strings and byte strings (previously only byte strings were supported). * Code in templates is no longer affected by Tornado's ``__future__`` imports (which previously included ``absolute_import`` and ``division``). `tornado.testing` ~~~~~~~~~~~~~~~~~ * New function `tornado.testing.bind_unused_port` both chooses a port and binds a socket to it, so there is no risk of another process using the same port. ``get_unused_port`` is now deprecated. * New decorator `tornado.testing.gen_test` can be used to allow for yielding `tornado.gen` objects in tests, as an alternative to the ``stop`` and ``wait`` methods of `.AsyncTestCase`. * `tornado.testing.AsyncTestCase` and friends now extend ``unittest2.TestCase`` when it is available (and continue to use the standard ``unittest`` module when ``unittest2`` is not available) * `tornado.testing.ExpectLog` can be used as a finer-grained alternative to ``tornado.testing.LogTrapTestCase`` * The command-line interface to `tornado.testing.main` now supports additional arguments from the underlying `unittest` module: ``verbose``, ``quiet``, ``failfast``, ``catch``, ``buffer``. * The deprecated ``--autoreload`` option of `tornado.testing.main` has been removed. Use ``python -m tornado.autoreload`` as a prefix command instead. * The ``--httpclient`` option of `tornado.testing.main` has been moved to ``tornado.test.runtests`` so as not to pollute the application option namespace. The `tornado.options` module's new callback support now makes it easy to add options from a wrapper script instead of putting all possible options in `tornado.testing.main`. * `.AsyncHTTPTestCase` no longer calls `.AsyncHTTPClient.close` for tests that use the singleton `.IOLoop.instance`. * ``LogTrapTestCase`` no longer fails when run in unknown logging configurations. This allows tests to be run under nose, which does its own log buffering (``LogTrapTestCase`` doesn't do anything useful in this case, but at least it doesn't break things any more). ``tornado.util`` ~~~~~~~~~~~~~~~~ * ``tornado.util.b`` (which was only intended for internal use) is gone. `tornado.web` ~~~~~~~~~~~~~ * `.RequestHandler.set_header` now overwrites previous header values case-insensitively. * `tornado.web.RequestHandler` has new attributes ``path_args`` and ``path_kwargs``, which contain the positional and keyword arguments that are passed to the ``get``/``post``/etc method. These attributes are set before those methods are called, so they are available during ``prepare()`` * `tornado.web.ErrorHandler` no longer requires XSRF tokens on ``POST`` requests, so posts to an unknown url will always return 404 instead of complaining about XSRF tokens. * Several methods related to HTTP status codes now take a ``reason`` keyword argument to specify an alternate "reason" string (i.e. the "Not Found" in "HTTP/1.1 404 Not Found"). It is now possible to set status codes other than those defined in the spec, as long as a reason string is given. * The ``Date`` HTTP header is now set by default on all responses. * ``Etag``/``If-None-Match`` requests now work with `.StaticFileHandler`. * `.StaticFileHandler` no longer sets ``Cache-Control: public`` unnecessarily. * When gzip is enabled in a `tornado.web.Application`, appropriate ``Vary: Accept-Encoding`` headers are now sent. * It is no longer necessary to pass all handlers for a host in a single `.Application.add_handlers` call. Now the request will be matched against the handlers for any ``host_pattern`` that includes the request's ``Host`` header. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * Client-side WebSocket support is now available: `tornado.websocket.websocket_connect` * `.WebSocketHandler` has new methods `~.WebSocketHandler.ping` and `~.WebSocketHandler.on_pong` to send pings to the browser (not supported on the ``draft76`` protocol) tornado-6.1.0/docs/releases/v3.0.1.rst000066400000000000000000000017731374705040500173130ustar00rootroot00000000000000What's new in Tornado 3.0.1 =========================== Apr 8, 2013 ----------- * The interface of `tornado.auth.FacebookGraphMixin` is now consistent with its documentation and the rest of the module. The ``get_authenticated_user`` and ``facebook_request`` methods return a ``Future`` and the ``callback`` argument is optional. * The `tornado.testing.gen_test` decorator will no longer be recognized as a (broken) test by ``nose``. * Work around a bug in Ubuntu 13.04 betas involving an incomplete backport of the `ssl.match_hostname` function. * `tornado.websocket.websocket_connect` now fails cleanly when it attempts to connect to a non-websocket url. * ``tornado.testing.LogTrapTestCase`` once again works with byte strings on Python 2. * The ``request`` attribute of `tornado.httpclient.HTTPResponse` is now always an `~tornado.httpclient.HTTPRequest`, never a ``_RequestProxy``. * Exceptions raised by the `tornado.gen` module now have better messages when tuples are used as callback keys. tornado-6.1.0/docs/releases/v3.0.2.rst000066400000000000000000000006411374705040500173050ustar00rootroot00000000000000What's new in Tornado 3.0.2 =========================== Jun 2, 2013 ----------- * `tornado.auth.TwitterMixin` now defaults to version 1.1 of the Twitter API, instead of version 1.0 which is being `discontinued on June 11 `_. It also now uses HTTPS when talking to Twitter. * Fixed a potential memory leak with a long chain of `.gen.coroutine` or ``gen.engine`` functions. tornado-6.1.0/docs/releases/v3.1.0.rst000066400000000000000000000246071374705040500173140ustar00rootroot00000000000000What's new in Tornado 3.1 ========================= Jun 15, 2013 ------------ Multiple modules ~~~~~~~~~~~~~~~~ * Many reference cycles have been broken up throughout the package, allowing for more efficient garbage collection on CPython. * Silenced some log messages when connections are opened and immediately closed (i.e. port scans), or other situations related to closed connections. * Various small speedups: `.HTTPHeaders` case normalization, `.UIModule` proxy objects, precompile some regexes. `tornado.auth` ~~~~~~~~~~~~~~ * `~tornado.auth.OAuthMixin` always sends ``oauth_version=1.0`` in its request as required by the spec. * `~tornado.auth.FacebookGraphMixin` now uses ``self._FACEBOOK_BASE_URL`` in `~.FacebookGraphMixin.facebook_request` to allow the base url to be overridden. * The ``authenticate_redirect`` and ``authorize_redirect`` methods in the `tornado.auth` mixin classes all now return Futures. These methods are asynchronous in `.OAuthMixin` and derived classes, although they do not take a callback. The `.Future` these methods return must be yielded if they are called from a function decorated with `.gen.coroutine` (but not ``gen.engine``). * `.TwitterMixin` now uses ``/account/verify_credentials`` to get information about the logged-in user, which is more robust against changing screen names. * The ``demos`` directory (in the source distribution) has a new ``twitter`` demo using `.TwitterMixin`. `tornado.escape` ~~~~~~~~~~~~~~~~ * `.url_escape` and `.url_unescape` have a new ``plus`` argument (defaulting to True for consistency with the previous behavior) which specifies whether they work like `urllib.parse.unquote` or `urllib.parse.unquote_plus`. `tornado.gen` ~~~~~~~~~~~~~ * Fixed a potential memory leak with long chains of `tornado.gen` coroutines. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * `tornado.httpclient.HTTPRequest` takes a new argument ``auth_mode``, which can be either ``basic`` or ``digest``. Digest authentication is only supported with ``tornado.curl_httpclient``. * ``tornado.curl_httpclient`` no longer goes into an infinite loop when pycurl returns a negative timeout. * ``curl_httpclient`` now supports the ``PATCH`` and ``OPTIONS`` methods without the use of ``allow_nonstandard_methods=True``. * Worked around a class of bugs in libcurl that would result in errors from `.IOLoop.update_handler` in various scenarios including digest authentication and socks proxies. * The ``TCP_NODELAY`` flag is now set when appropriate in ``simple_httpclient``. * ``simple_httpclient`` no longer logs exceptions, since those exceptions are made available to the caller as ``HTTPResponse.error``. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * `tornado.httpserver.HTTPServer` handles malformed HTTP headers more gracefully. * `.HTTPServer` now supports lists of IPs in ``X-Forwarded-For`` (it chooses the last, i.e. nearest one). * Memory is now reclaimed promptly on CPython when an HTTP request fails because it exceeded the maximum upload size. * The ``TCP_NODELAY`` flag is now set when appropriate in `.HTTPServer`. * The `.HTTPServer` ``no_keep_alive`` option is now respected with HTTP 1.0 connections that explicitly pass ``Connection: keep-alive``. * The ``Connection: keep-alive`` check for HTTP 1.0 connections is now case-insensitive. * The `str` and `repr` of ``tornado.httpserver.HTTPRequest`` no longer include the request body, reducing log spam on errors (and potential exposure/retention of private data). `tornado.httputil` ~~~~~~~~~~~~~~~~~~ * The cache used in `.HTTPHeaders` will no longer grow without bound. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * Some `.IOLoop` implementations (such as ``pyzmq``) accept objects other than integer file descriptors; these objects will now have their ``.close()`` method called when the ``IOLoop` is closed with ``all_fds=True``. * The stub handles left behind by `.IOLoop.remove_timeout` will now get cleaned up instead of waiting to expire. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * Fixed a bug in `.BaseIOStream.read_until_close` that would sometimes cause data to be passed to the final callback instead of the streaming callback. * The `.IOStream` close callback is now run more reliably if there is an exception in ``_try_inline_read``. * New method `.BaseIOStream.set_nodelay` can be used to set the ``TCP_NODELAY`` flag. * Fixed a case where errors in ``SSLIOStream.connect`` (and ``SimpleAsyncHTTPClient``) were not being reported correctly. `tornado.locale` ~~~~~~~~~~~~~~~~ * `.Locale.format_date` now works on Python 3. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * The default `.Resolver` implementation now works on Solaris. * `.Resolver` now has a `~.Resolver.close` method. * Fixed a potential CPU DoS when ``tornado.netutil.ssl_match_hostname`` is used on certificates with an abusive wildcard pattern. * All instances of `.ThreadedResolver` now share a single thread pool, whose size is set by the first one to be created (or the static ``Resolver.configure`` method). * `.ExecutorResolver` is now documented for public use. * `.bind_sockets` now works in configurations with incomplete IPv6 support. `tornado.options` ~~~~~~~~~~~~~~~~~ * `tornado.options.define` with ``multiple=True`` now works on Python 3. * `tornado.options.options` and other `.OptionParser` instances support some new dict-like methods: `~.OptionParser.items()`, iteration over keys, and (read-only) access to options with square braket syntax. `.OptionParser.group_dict` returns all options with a given group name, and `.OptionParser.as_dict` returns all options. `tornado.process` ~~~~~~~~~~~~~~~~~ * `tornado.process.Subprocess` no longer leaks file descriptors into the child process, which fixes a problem in which the child could not detect that the parent process had closed its stdin pipe. * `.Subprocess.set_exit_callback` now works for subprocesses created without an explicit ``io_loop`` parameter. ``tornado.stack_context`` ~~~~~~~~~~~~~~~~~~~~~~~~~ * ``tornado.stack_context`` has been rewritten and is now much faster. * New function ``run_with_stack_context`` facilitates the use of stack contexts with coroutines. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ * The constructors of `.TCPServer` and `.HTTPServer` now take a ``max_buffer_size`` keyword argument. `tornado.template` ~~~~~~~~~~~~~~~~~~ * Some internal names used by the template system have been changed; now all "reserved" names in templates start with ``_tt_``. `tornado.testing` ~~~~~~~~~~~~~~~~~ * `tornado.testing.AsyncTestCase.wait` now raises the correct exception when it has been modified by ``tornado.stack_context``. * `tornado.testing.gen_test` can now be called as ``@gen_test(timeout=60)`` to give some tests a longer timeout than others. * The environment variable ``ASYNC_TEST_TIMEOUT`` can now be set to override the default timeout for `.AsyncTestCase.wait` and `.gen_test`. * `.bind_unused_port` now passes ``None`` instead of ``0`` as the port to ``getaddrinfo``, which works better with some unusual network configurations. `tornado.util` ~~~~~~~~~~~~~~ * `tornado.util.import_object` now works with top-level module names that do not contain a dot. * `tornado.util.import_object` now consistently raises `ImportError` instead of `AttributeError` when it fails. `tornado.web` ~~~~~~~~~~~~~ * The ``handlers`` list passed to the `tornado.web.Application` constructor and `~tornado.web.Application.add_handlers` methods can now contain lists in addition to tuples and `~tornado.web.URLSpec` objects. * `tornado.web.StaticFileHandler` now works on Windows when the client passes an ``If-Modified-Since`` timestamp before 1970. * New method `.RequestHandler.log_exception` can be overridden to customize the logging behavior when an exception is uncaught. Most apps that currently override ``_handle_request_exception`` can now use a combination of `.RequestHandler.log_exception` and `.write_error`. * `.RequestHandler.get_argument` now raises `.MissingArgumentError` (a subclass of `tornado.web.HTTPError`, which is what it raised previously) if the argument cannot be found. * `.Application.reverse_url` now uses `.url_escape` with ``plus=False``, i.e. spaces are encoded as ``%20`` instead of ``+``. * Arguments extracted from the url path are now decoded with `.url_unescape` with ``plus=False``, so plus signs are left as-is instead of being turned into spaces. * `.RequestHandler.send_error` will now only be called once per request, even if multiple exceptions are caught by the stack context. * The ``tornado.web.asynchronous`` decorator is no longer necessary for methods that return a `.Future` (i.e. those that use the `.gen.coroutine` or ``return_future`` decorators) * `.RequestHandler.prepare` may now be asynchronous if it returns a `.Future`. The ``tornado.web.asynchronous`` decorator is not used with ``prepare``; one of the `.Future`-related decorators should be used instead. * ``RequestHandler.current_user`` may now be assigned to normally. * `.RequestHandler.redirect` no longer silently strips control characters and whitespace. It is now an error to pass control characters, newlines or tabs. * `.StaticFileHandler` has been reorganized internally and now has additional extension points that can be overridden in subclasses. * `.StaticFileHandler` now supports HTTP ``Range`` requests. `.StaticFileHandler` is still not suitable for files too large to comfortably fit in memory, but ``Range`` support is necessary in some browsers to enable seeking of HTML5 audio and video. * `.StaticFileHandler` now uses longer hashes by default, and uses the same hashes for ``Etag`` as it does for versioned urls. * `.StaticFileHandler.make_static_url` and `.RequestHandler.static_url` now have an additional keyword argument ``include_version`` to suppress the url versioning. * `.StaticFileHandler` now reads its file in chunks, which will reduce memory fragmentation. * Fixed a problem with the ``Date`` header and cookie expiration dates when the system locale is set to a non-english configuration. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * `.WebSocketHandler` now catches `.StreamClosedError` and runs `~.WebSocketHandler.on_close` immediately instead of logging a stack trace. * New method `.WebSocketHandler.set_nodelay` can be used to set the ``TCP_NODELAY`` flag. `tornado.wsgi` ~~~~~~~~~~~~~~ * Fixed an exception in `.WSGIContainer` when the connection is closed while output is being written. tornado-6.1.0/docs/releases/v3.1.1.rst000066400000000000000000000005341374705040500173060ustar00rootroot00000000000000What's new in Tornado 3.1.1 =========================== Sep 1, 2013 ----------- * `.StaticFileHandler` no longer fails if the client requests a ``Range`` that is larger than the entire file (Facebook has a crawler that does this). * `.RequestHandler.on_connection_close` now works correctly on subsequent requests of a keep-alive connection. tornado-6.1.0/docs/releases/v3.2.0.rst000066400000000000000000000161431374705040500173110ustar00rootroot00000000000000What's new in Tornado 3.2 ========================= Jan 14, 2014 ------------ Installation ~~~~~~~~~~~~ * Tornado now depends on the `backports.ssl_match_hostname `_ when running on Python 2. This will be installed automatically when using ``pip`` or ``easy_install`` * Tornado now includes an optional C extension module, which greatly improves performance of websockets. This extension will be built automatically if a C compiler is found at install time. New modules ~~~~~~~~~~~ * The `tornado.platform.asyncio` module provides integration with the ``asyncio`` module introduced in Python 3.4 (also available for Python 3.3 with ``pip install asyncio``). `tornado.auth` ~~~~~~~~~~~~~~ * Added `.GoogleOAuth2Mixin` support authentication to Google services with OAuth 2 instead of OpenID and OAuth 1. * `.FacebookGraphMixin` has been updated to use the current Facebook login URL, which saves a redirect. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ * ``TracebackFuture`` now accepts a ``timeout`` keyword argument (although it is still incorrect to use a non-zero timeout in non-blocking code). ``tornado.curl_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``tornado.curl_httpclient`` now works on Python 3 with the soon-to-be-released pycurl 7.19.3, which will officially support Python 3 for the first time. Note that there are some unofficial Python 3 ports of pycurl (Ubuntu has included one for its past several releases); these are not supported for use with Tornado. `tornado.escape` ~~~~~~~~~~~~~~~~ * `.xhtml_escape` now escapes apostrophes as well. * `tornado.escape.utf8`, `.to_unicode`, and `.native_str` now raise `TypeError` instead of `AssertionError` when given an invalid value. `tornado.gen` ~~~~~~~~~~~~~ * Coroutines may now yield dicts in addition to lists to wait for multiple tasks in parallel. * Improved performance of `tornado.gen` when yielding a `.Future` that is already done. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * `tornado.httpclient.HTTPRequest` now uses property setters so that setting attributes after construction applies the same conversions as ``__init__`` (e.g. converting the body attribute to bytes). `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * Malformed ``x-www-form-urlencoded`` request bodies will now log a warning and continue instead of causing the request to fail (similar to the existing handling of malformed ``multipart/form-data`` bodies. This is done mainly because some libraries send this content type by default even when the data is not form-encoded. * Fix some error messages for unix sockets (and other non-IP sockets) `tornado.ioloop` ~~~~~~~~~~~~~~~~ * `.IOLoop` now uses ``IOLoop.handle_callback_exception`` consistently for error logging. * `.IOLoop` now frees callback objects earlier, reducing memory usage while idle. * `.IOLoop` will no longer call `logging.basicConfig` if there is a handler defined for the root logger or for the ``tornado`` or ``tornado.application`` loggers (previously it only looked at the root logger). `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * `.IOStream` now recognizes ``ECONNABORTED`` error codes in more places (which was mainly an issue on Windows). * `.IOStream` now frees memory earlier if a connection is closed while there is data in the write buffer. * `.PipeIOStream` now handles ``EAGAIN`` error codes correctly. * `.SSLIOStream` now initiates the SSL handshake automatically without waiting for the application to try and read or write to the connection. * Swallow a spurious exception from ``set_nodelay`` when a connection has been reset. `tornado.locale` ~~~~~~~~~~~~~~~~ * `.Locale.format_date` no longer forces the use of absolute dates in Russian. `tornado.log` ~~~~~~~~~~~~~ * Fix an error from `tornado.log.enable_pretty_logging` when `sys.stderr` does not have an ``isatty`` method. * `tornado.log.LogFormatter` now accepts keyword arguments ``fmt`` and ``datefmt``. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * `.is_valid_ip` (and therefore ``HTTPRequest.remote_ip``) now rejects empty strings. * Synchronously using `.ThreadedResolver` at import time to resolve a unicode hostname no longer deadlocks. `tornado.platform.twisted` ~~~~~~~~~~~~~~~~~~~~~~~~~~ * `.TwistedResolver` now has better error handling. `tornado.process` ~~~~~~~~~~~~~~~~~ * `.Subprocess` no longer leaks file descriptors if `subprocess.Popen` fails. ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``simple_httpclient`` now applies the ``connect_timeout`` to requests that are queued and have not yet started. * On Python 2.6, ``simple_httpclient`` now uses TLSv1 instead of SSLv3. * ``simple_httpclient`` now enforces the connect timeout during DNS resolution. * The embedded ``ca-certificates.crt`` file has been updated with the current Mozilla CA list. `tornado.web` ~~~~~~~~~~~~~ * `.StaticFileHandler` no longer fails if the client requests a ``Range`` that is larger than the entire file (Facebook has a crawler that does this). * `.RequestHandler.on_connection_close` now works correctly on subsequent requests of a keep-alive connection. * New application setting ``default_handler_class`` can be used to easily set up custom 404 pages. * New application settings ``autoreload``, ``compiled_template_cache``, ``static_hash_cache``, and ``serve_traceback`` can be used to control individual aspects of debug mode. * New methods `.RequestHandler.get_query_argument` and `.RequestHandler.get_body_argument` and new attributes ``HTTPRequest.query_arguments`` and ``HTTPRequest.body_arguments`` allow access to arguments without intermingling those from the query string with those from the request body. * `.RequestHandler.decode_argument` and related methods now raise an ``HTTPError(400)`` instead of `UnicodeDecodeError` when the argument could not be decoded. * `.RequestHandler.clear_all_cookies` now accepts ``domain`` and ``path`` arguments, just like `~.RequestHandler.clear_cookie`. * It is now possible to specify handlers by name when using the `tornado.web.URLSpec` class. * `.Application` now accepts 4-tuples to specify the ``name`` parameter (which previously required constructing a `tornado.web.URLSpec` object instead of a tuple). * Fixed an incorrect error message when handler methods return a value other than None or a Future. * Exceptions will no longer be logged twice when using both ``@asynchronous`` and ``@gen.coroutine`` `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * `.WebSocketHandler.write_message` now raises `.WebSocketClosedError` instead of `AttributeError` when the connection has been closed. * `.websocket_connect` now accepts preconstructed ``HTTPRequest`` objects. * Fix a bug with `.WebSocketHandler` when used with some proxies that unconditionally modify the ``Connection`` header. * `.websocket_connect` now returns an error immediately for refused connections instead of waiting for the timeout. * `.WebSocketClientConnection` now has a ``close`` method. `tornado.wsgi` ~~~~~~~~~~~~~~ * `.WSGIContainer` now calls the iterable's ``close()`` method even if an error is raised, in compliance with the spec. tornado-6.1.0/docs/releases/v3.2.1.rst000066400000000000000000000032011374705040500173010ustar00rootroot00000000000000What's new in Tornado 3.2.1 =========================== May 5, 2014 ----------- Security fixes ~~~~~~~~~~~~~~ * The signed-value format used by `.RequestHandler.set_secure_cookie` and `.RequestHandler.get_secure_cookie` has changed to be more secure. **This is a disruptive change**. The ``secure_cookie`` functions take new ``version`` parameters to support transitions between cookie formats. * The new cookie format fixes a vulnerability that may be present in applications that use multiple cookies where the name of one cookie is a prefix of the name of another. * To minimize disruption, cookies in the older format will be accepted by default until they expire. Applications that may be vulnerable can reject all cookies in the older format by passing ``min_version=2`` to `.RequestHandler.get_secure_cookie`. * Thanks to Joost Pol of `Certified Secure `_ for reporting this issue. Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Signed cookies issued by `.RequestHandler.set_secure_cookie` in Tornado 3.2.1 cannot be read by older releases. If you need to run 3.2.1 in parallel with older releases, you can pass ``version=1`` to `.RequestHandler.set_secure_cookie` to issue cookies that are backwards-compatible (but have a known weakness, so this option should only be used for a transitional period). Other changes ~~~~~~~~~~~~~ * The C extension used to speed up the websocket module now compiles correctly on Windows with MSVC and 64-bit mode. The fallback to the pure-Python alternative now works correctly on Mac OS X machines with no C compiler installed. tornado-6.1.0/docs/releases/v3.2.2.rst000066400000000000000000000016301374705040500173060ustar00rootroot00000000000000What's new in Tornado 3.2.2 =========================== June 3, 2014 ------------ Security fixes ~~~~~~~~~~~~~~ * The XSRF token is now encoded with a random mask on each request. This makes it safe to include in compressed pages without being vulnerable to the `BREACH attack `_. This applies to most applications that use both the ``xsrf_cookies`` and ``gzip`` options (or have gzip applied by a proxy). Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * If Tornado 3.2.2 is run at the same time as older versions on the same domain, there is some potential for issues with the differing cookie versions. The `.Application` setting ``xsrf_cookie_version=1`` can be used for a transitional period to generate the older cookie format on newer servers. Other changes ~~~~~~~~~~~~~ * ``tornado.platform.asyncio`` is now compatible with ``trollius`` version 0.3. tornado-6.1.0/docs/releases/v4.0.0.rst000066400000000000000000000351131374705040500173060ustar00rootroot00000000000000What's new in Tornado 4.0 ========================= July 15, 2014 ------------- Highlights ~~~~~~~~~~ * The `tornado.web.stream_request_body` decorator allows large files to be uploaded with limited memory usage. * Coroutines are now faster and are used extensively throughout Tornado itself. More methods now return `Futures <.Future>`, including most `.IOStream` methods and `.RequestHandler.flush`. * Many user-overridden methods are now allowed to return a `.Future` for flow control. * HTTP-related code is now shared between the `tornado.httpserver`, ``tornado.simple_httpclient`` and `tornado.wsgi` modules, making support for features such as chunked and gzip encoding more consistent. `.HTTPServer` now uses new delegate interfaces defined in `tornado.httputil` in addition to its old single-callback interface. * New module `tornado.tcpclient` creates TCP connections with non-blocking DNS, SSL handshaking, and support for IPv6. Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * `tornado.concurrent.Future` is no longer thread-safe; use `concurrent.futures.Future` when thread-safety is needed. * Tornado now depends on the `certifi `_ package instead of bundling its own copy of the Mozilla CA list. This will be installed automatically when using ``pip`` or ``easy_install``. * This version includes the changes to the secure cookie format first introduced in version :doc:`3.2.1 `, and the xsrf token change in version :doc:`3.2.2 `. If you are upgrading from an earlier version, see those versions' release notes. * WebSocket connections from other origin sites are now rejected by default. To accept cross-origin websocket connections, override the new method `.WebSocketHandler.check_origin`. * `.WebSocketHandler` no longer supports the old ``draft 76`` protocol (this mainly affects Safari 5.x browsers). Applications should use non-websocket workarounds for these browsers. * Authors of alternative `.IOLoop` implementations should see the changes to `.IOLoop.add_handler` in this release. * The ``RequestHandler.async_callback`` and ``WebSocketHandler.async_callback`` wrapper functions have been removed; they have been obsolete for a long time due to stack contexts (and more recently coroutines). * ``curl_httpclient`` now requires a minimum of libcurl version 7.21.1 and pycurl 7.18.2. * Support for ``RequestHandler.get_error_html`` has been removed; override `.RequestHandler.write_error` instead. Other notes ~~~~~~~~~~~ * The git repository has moved to https://github.com/tornadoweb/tornado. All old links should be redirected to the new location. * An `announcement mailing list `_ is now available. * All Tornado modules are now importable on Google App Engine (although the App Engine environment does not allow the system calls used by `.IOLoop` so many modules are still unusable). `tornado.auth` ~~~~~~~~~~~~~~ * Fixed a bug in ``.FacebookMixin`` on Python 3. * When using the `.Future` interface, exceptions are more reliably delivered to the caller. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ * `tornado.concurrent.Future` is now always thread-unsafe (previously it would be thread-safe if the `concurrent.futures` package was available). This improves performance and provides more consistent semantics. The parts of Tornado that accept Futures will accept both Tornado's thread-unsafe Futures and the thread-safe `concurrent.futures.Future`. * `tornado.concurrent.Future` now includes all the functionality of the old ``TracebackFuture`` class. ``TracebackFuture`` is now simply an alias for ``Future``. ``tornado.curl_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``curl_httpclient`` now passes along the HTTP "reason" string in ``response.reason``. `tornado.gen` ~~~~~~~~~~~~~ * Performance of coroutines has been improved. * Coroutines no longer generate ``StackContexts`` by default, but they will be created on demand when needed. * The internals of the `tornado.gen` module have been rewritten to improve performance when using ``Futures``, at the expense of some performance degradation for the older ``YieldPoint`` interfaces. * New function `.with_timeout` wraps a `.Future` and raises an exception if it doesn't complete in a given amount of time. * New object `.moment` can be yielded to allow the IOLoop to run for one iteration before resuming. * ``Task`` is now a function returning a `.Future` instead of a ``YieldPoint`` subclass. This change should be transparent to application code, but allows ``Task`` to take advantage of the newly-optimized `.Future` handling. `tornado.http1connection` ~~~~~~~~~~~~~~~~~~~~~~~~~ * New module contains the HTTP implementation shared by `tornado.httpserver` and ``tornado.simple_httpclient``. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * The command-line HTTP client (``python -m tornado.httpclient $URL``) now works on Python 3. * Fixed a memory leak in `.AsyncHTTPClient` shutdown that affected applications that created many HTTP clients and IOLoops. * New client request parameter ``decompress_response`` replaces the existing ``use_gzip`` parameter; both names are accepted. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * ``tornado.httpserver.HTTPRequest`` has moved to `tornado.httputil.HTTPServerRequest`. * HTTP implementation has been unified with ``tornado.simple_httpclient`` in `tornado.http1connection`. * Now supports ``Transfer-Encoding: chunked`` for request bodies. * Now supports ``Content-Encoding: gzip`` for request bodies if ``decompress_request=True`` is passed to the `.HTTPServer` constructor. * The ``connection`` attribute of `.HTTPServerRequest` is now documented for public use; applications are expected to write their responses via the `.HTTPConnection` interface. * The ``HTTPServerRequest.write`` and ``HTTPServerRequest.finish`` methods are now deprecated. (`.RequestHandler.write` and `.RequestHandler.finish` are *not* deprecated; this only applies to the methods on `.HTTPServerRequest`) * `.HTTPServer` now supports `.HTTPServerConnectionDelegate` in addition to the old ``request_callback`` interface. The delegate interface supports streaming of request bodies. * `.HTTPServer` now detects the error of an application sending a ``Content-Length`` error that is inconsistent with the actual content. * New constructor arguments ``max_header_size`` and ``max_body_size`` allow separate limits to be set for different parts of the request. ``max_body_size`` is applied even in streaming mode. * New constructor argument ``chunk_size`` can be used to limit the amount of data read into memory at one time per request. * New constructor arguments ``idle_connection_timeout`` and ``body_timeout`` allow time limits to be placed on the reading of requests. * Form-encoded message bodies are now parsed for all HTTP methods, not just ``POST``, ``PUT``, and ``PATCH``. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ * `.HTTPServerRequest` was moved to this module from `tornado.httpserver`. * New base classes `.HTTPConnection`, `.HTTPServerConnectionDelegate`, and `.HTTPMessageDelegate` define the interaction between applications and the HTTP implementation. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * `.IOLoop.add_handler` and related methods now accept file-like objects in addition to raw file descriptors. Passing the objects is recommended (when possible) to avoid a garbage-collection-related problem in unit tests. * New method `.IOLoop.clear_instance` makes it possible to uninstall the singleton instance. * Timeout scheduling is now more robust against slow callbacks. * `.IOLoop.add_timeout` is now a bit more efficient. * When a function run by the `.IOLoop` returns a `.Future` and that `.Future` has an exception, the `.IOLoop` will log the exception. * New method `.IOLoop.spawn_callback` simplifies the process of launching a fire-and-forget callback that is separated from the caller's stack context. * New methods `.IOLoop.call_later` and `.IOLoop.call_at` simplify the specification of relative or absolute timeouts (as opposed to `~.IOLoop.add_timeout`, which used the type of its argument). `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * The ``callback`` argument to most `.IOStream` methods is now optional. When called without a callback the method will return a `.Future` for use with coroutines. * New method `.IOStream.start_tls` converts an `.IOStream` to an `.SSLIOStream`. * No longer gets confused when an ``IOError`` or ``OSError`` without an ``errno`` attribute is raised. * `.BaseIOStream.read_bytes` now accepts a ``partial`` keyword argument, which can be used to return before the full amount has been read. This is a more coroutine-friendly alternative to ``streaming_callback``. * `.BaseIOStream.read_until` and ``read_until_regex`` now acept a ``max_bytes`` keyword argument which will cause the request to fail if it cannot be satisfied from the given number of bytes. * `.IOStream` no longer reads from the socket into memory if it does not need data to satisfy a pending read. As a side effect, the close callback will not be run immediately if the other side closes the connection while there is unconsumed data in the buffer. * The default ``chunk_size`` has been increased to 64KB (from 4KB) * The `.IOStream` constructor takes a new keyword argument ``max_write_buffer_size`` (defaults to unlimited). Calls to `.BaseIOStream.write` will raise `.StreamBufferFullError` if the amount of unsent buffered data exceeds this limit. * ``ETIMEDOUT`` errors are no longer logged. If you need to distinguish timeouts from other forms of closed connections, examine ``stream.error`` from a close callback. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * When `.bind_sockets` chooses a port automatically, it will now use the same port for IPv4 and IPv6. * TLS compression is now disabled by default on Python 3.3 and higher (it is not possible to change this option in older versions). `tornado.options` ~~~~~~~~~~~~~~~~~ * It is now possible to disable the default logging configuration by setting ``options.logging`` to ``None`` instead of the string ``"none"``. `tornado.platform.asyncio` ~~~~~~~~~~~~~~~~~~~~~~~~~~ * Now works on Python 2.6. * Now works with Trollius version 0.3. `tornado.platform.twisted` ~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``TwistedIOLoop`` now works on Python 3.3+ (with Twisted 14.0.0+). ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``simple_httpclient`` has better support for IPv6, which is now enabled by default. * Improved default cipher suite selection (Python 2.7+). * HTTP implementation has been unified with ``tornado.httpserver`` in `tornado.http1connection` * Streaming request bodies are now supported via the ``body_producer`` keyword argument to `tornado.httpclient.HTTPRequest`. * The ``expect_100_continue`` keyword argument to `tornado.httpclient.HTTPRequest` allows the use of the HTTP ``Expect: 100-continue`` feature. * ``simple_httpclient`` now raises the original exception (e.g. an `IOError`) in more cases, instead of converting everything to ``HTTPError``. ``tornado.stack_context`` ~~~~~~~~~~~~~~~~~~~~~~~~~ * The stack context system now has less performance overhead when no stack contexts are active. `tornado.tcpclient` ~~~~~~~~~~~~~~~~~~~ * New module which creates TCP connections and IOStreams, including name resolution, connecting, and SSL handshakes. `tornado.testing` ~~~~~~~~~~~~~~~~~ * `.AsyncTestCase` now attempts to detect test methods that are generators but were not run with ``@gen_test`` or any similar decorator (this would previously result in the test silently being skipped). * Better stack traces are now displayed when a test times out. * The ``@gen_test`` decorator now passes along ``*args, **kwargs`` so it can be used on functions with arguments. * Fixed the test suite when ``unittest2`` is installed on Python 3. `tornado.web` ~~~~~~~~~~~~~ * It is now possible to support streaming request bodies with the `.stream_request_body` decorator and the new `.RequestHandler.data_received` method. * `.RequestHandler.flush` now returns a `.Future` if no callback is given. * New exception `.Finish` may be raised to finish a request without triggering error handling. * When gzip support is enabled, all ``text/*`` mime types will be compressed, not just those on a whitelist. * `.Application` now implements the `.HTTPMessageDelegate` interface. * ``HEAD`` requests in `.StaticFileHandler` no longer read the entire file. * `.StaticFileHandler` now streams response bodies to the client. * New setting ``compress_response`` replaces the existing ``gzip`` setting; both names are accepted. * XSRF cookies that were not generated by this module (i.e. strings without any particular formatting) are once again accepted (as long as the cookie and body/header match). This pattern was common for testing and non-browser clients but was broken by the changes in Tornado 3.2.2. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * WebSocket connections from other origin sites are now rejected by default. Browsers do not use the same-origin policy for WebSocket connections as they do for most other browser-initiated communications. This can be surprising and a security risk, so we disallow these connections on the server side by default. To accept cross-origin websocket connections, override the new method `.WebSocketHandler.check_origin`. * `.WebSocketHandler.close` and `.WebSocketClientConnection.close` now support ``code`` and ``reason`` arguments to send a status code and message to the other side of the connection when closing. Both classes also have ``close_code`` and ``close_reason`` attributes to receive these values when the other side closes. * The C speedup module now builds correctly with MSVC, and can support messages larger than 2GB on 64-bit systems. * The fallback mechanism for detecting a missing C compiler now works correctly on Mac OS X. * Arguments to `.WebSocketHandler.open` are now decoded in the same way as arguments to `.RequestHandler.get` and similar methods. * It is now allowed to override ``prepare`` in a `.WebSocketHandler`, and this method may generate HTTP responses (error pages) in the usual way. The HTTP response methods are still not allowed once the WebSocket handshake has completed. `tornado.wsgi` ~~~~~~~~~~~~~~ * New class ``WSGIAdapter`` supports running a Tornado `.Application` on a WSGI server in a way that is more compatible with Tornado's non-WSGI `.HTTPServer`. ``WSGIApplication`` is deprecated in favor of using ``WSGIAdapter`` with a regular `.Application`. * ``WSGIAdapter`` now supports gzipped output. tornado-6.1.0/docs/releases/v4.0.1.rst000066400000000000000000000015351374705040500173100ustar00rootroot00000000000000What's new in Tornado 4.0.1 =========================== Aug 12, 2014 ------------ * The build will now fall back to pure-python mode if the C extension fails to build for any reason (previously it would fall back for some errors but not others). * `.IOLoop.call_at` and `.IOLoop.call_later` now always return a timeout handle for use with `.IOLoop.remove_timeout`. * If any callback of a `.PeriodicCallback` or `.IOStream` returns a `.Future`, any error raised in that future will now be logged (similar to the behavior of `.IOLoop.add_callback`). * Fixed an exception in client-side websocket connections when the connection is closed. * ``simple_httpclient`` once again correctly handles 204 status codes with no content-length header. * Fixed a regression in ``simple_httpclient`` that would result in timeouts for certain kinds of errors. tornado-6.1.0/docs/releases/v4.0.2.rst000066400000000000000000000014701374705040500173070ustar00rootroot00000000000000What's new in Tornado 4.0.2 =========================== Sept 10, 2014 ------------- Bug fixes ~~~~~~~~~ * Fixed a bug that could sometimes cause a timeout to fire after being cancelled. * `.AsyncTestCase` once again passes along arguments to test methods, making it compatible with extensions such as Nose's test generators. * `.StaticFileHandler` can again compress its responses when gzip is enabled. * ``simple_httpclient`` passes its ``max_buffer_size`` argument to the underlying stream. * Fixed a reference cycle that can lead to increased memory consumption. * `.add_accept_handler` will now limit the number of times it will call `~socket.socket.accept` per `.IOLoop` iteration, addressing a potential starvation issue. * Improved error handling in `.IOStream.connect` (primarily for FreeBSD systems) tornado-6.1.0/docs/releases/v4.1.0.rst000066400000000000000000000172531374705040500173140ustar00rootroot00000000000000What's new in Tornado 4.1 ========================= Feb 7, 2015 ----------- Highlights ~~~~~~~~~~ * If a `.Future` contains an exception but that exception is never examined or re-raised (e.g. by yielding the `.Future`), a stack trace will be logged when the `.Future` is garbage-collected. * New class `tornado.gen.WaitIterator` provides a way to iterate over ``Futures`` in the order they resolve. * The `tornado.websocket` module now supports compression via the "permessage-deflate" extension. Override `.WebSocketHandler.get_compression_options` to enable on the server side, and use the ``compression_options`` keyword argument to `.websocket_connect` on the client side. * When the appropriate packages are installed, it is possible to yield `asyncio.Future` or Twisted ``Defered`` objects in Tornado coroutines. Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * `.HTTPServer` now calls ``start_request`` with the correct arguments. This change is backwards-incompatible, affecting any application which implemented `.HTTPServerConnectionDelegate` by following the example of `.Application` instead of the documented method signatures. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ * If a `.Future` contains an exception but that exception is never examined or re-raised (e.g. by yielding the `.Future`), a stack trace will be logged when the `.Future` is garbage-collected. * `.Future` now catches and logs exceptions in its callbacks. ``tornado.curl_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``tornado.curl_httpclient`` now supports request bodies for ``PATCH`` and custom methods. * ``tornado.curl_httpclient`` now supports resubmitting bodies after following redirects for methods other than ``POST``. * ``curl_httpclient`` now runs the streaming and header callbacks on the IOLoop. * ``tornado.curl_httpclient`` now uses its own logger for debug output so it can be filtered more easily. `tornado.gen` ~~~~~~~~~~~~~ * New class `tornado.gen.WaitIterator` provides a way to iterate over ``Futures`` in the order they resolve. * When the `~functools.singledispatch` library is available (standard on Python 3.4, available via ``pip install singledispatch`` on older versions), the `.convert_yielded` function can be used to make other kinds of objects yieldable in coroutines. * New function `tornado.gen.sleep` is a coroutine-friendly analogue to `time.sleep`. * ``gen.engine`` now correctly captures the stack context for its callbacks. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * `tornado.httpclient.HTTPRequest` accepts a new argument ``raise_error=False`` to suppress the default behavior of raising an error for non-200 response codes. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * `.HTTPServer` now calls ``start_request`` with the correct arguments. This change is backwards-incompatible, afffecting any application which implemented `.HTTPServerConnectionDelegate` by following the example of `.Application` instead of the documented method signatures. * `.HTTPServer` now tolerates extra newlines which are sometimes inserted between requests on keep-alive connections. * `.HTTPServer` can now use keep-alive connections after a request with a chunked body. * `.HTTPServer` now always reports ``HTTP/1.1`` instead of echoing the request version. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ * New function `tornado.httputil.split_host_and_port` for parsing the ``netloc`` portion of URLs. * The ``context`` argument to `.HTTPServerRequest` is now optional, and if a context is supplied the ``remote_ip`` attribute is also optional. * `.HTTPServerRequest.body` is now always a byte string (previously the default empty body would be a unicode string on python 3). * Header parsing now works correctly when newline-like unicode characters are present. * Header parsing again supports both CRLF and bare LF line separators. * Malformed ``multipart/form-data`` bodies will always be logged quietly instead of raising an unhandled exception; previously the behavior was inconsistent depending on the exact error. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * The ``kqueue`` and ``select`` IOLoop implementations now report writeability correctly, fixing flow control in IOStream. * When a new `.IOLoop` is created, it automatically becomes "current" for the thread if there is not already a current instance. * New method `.PeriodicCallback.is_running` can be used to see whether the `.PeriodicCallback` has been started. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * `.IOStream.start_tls` now uses the ``server_hostname`` parameter for certificate validation. * `.SSLIOStream` will no longer consume 100% CPU after certain error conditions. * `.SSLIOStream` no longer logs ``EBADF`` errors during the handshake as they can result from nmap scans in certain modes. `tornado.options` ~~~~~~~~~~~~~~~~~ * `~tornado.options.parse_config_file` now always decodes the config file as utf8 on Python 3. * `tornado.options.define` more accurately finds the module defining the option. ``tornado.platform.asyncio`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * It is now possible to yield ``asyncio.Future`` objects in coroutines when the `~functools.singledispatch` library is available and ``tornado.platform.asyncio`` has been imported. * New methods `tornado.platform.asyncio.to_tornado_future` and `~tornado.platform.asyncio.to_asyncio_future` convert between the two libraries' `.Future` classes. ``tornado.platform.twisted`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * It is now possible to yield ``Deferred`` objects in coroutines when the `~functools.singledispatch` library is available and ``tornado.platform.twisted`` has been imported. `tornado.tcpclient` ~~~~~~~~~~~~~~~~~~~ * `.TCPClient` will no longer raise an exception due to an ill-timed timeout. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ * `.TCPServer` no longer ignores its ``read_chunk_size`` argument. `tornado.testing` ~~~~~~~~~~~~~~~~~ * `.AsyncTestCase` has better support for multiple exceptions. Previously it would silently swallow all but the last; now it raises the first and logs all the rest. * `.AsyncTestCase` now cleans up `.Subprocess` state on ``tearDown`` when necessary. `tornado.web` ~~~~~~~~~~~~~ * The ``asynchronous`` decorator now understands `concurrent.futures.Future` in addition to `tornado.concurrent.Future`. * `.StaticFileHandler` no longer logs a stack trace if the connection is closed while sending the file. * `.RequestHandler.send_error` now supports a ``reason`` keyword argument, similar to `tornado.web.HTTPError`. * `.RequestHandler.locale` now has a property setter. * `.Application.add_handlers` hostname matching now works correctly with IPv6 literals. * Redirects for the `.Application` ``default_host`` setting now match the request protocol instead of redirecting HTTPS to HTTP. * Malformed ``_xsrf`` cookies are now ignored instead of causing uncaught exceptions. * ``Application.start_request`` now has the same signature as `.HTTPServerConnectionDelegate.start_request`. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * The `tornado.websocket` module now supports compression via the "permessage-deflate" extension. Override `.WebSocketHandler.get_compression_options` to enable on the server side, and use the ``compression_options`` keyword argument to `.websocket_connect` on the client side. * `.WebSocketHandler` no longer logs stack traces when the connection is closed. * `.WebSocketHandler.open` now accepts ``*args, **kw`` for consistency with ``RequestHandler.get`` and related methods. * The ``Sec-WebSocket-Version`` header now includes all supported versions. * `.websocket_connect` now has a ``on_message_callback`` keyword argument for callback-style use without ``read_message()``. tornado-6.1.0/docs/releases/v4.2.0.rst000066400000000000000000000211401374705040500173030ustar00rootroot00000000000000What's new in Tornado 4.2 ========================= May 26, 2015 ------------ Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``SSLIOStream.connect`` and `.IOStream.start_tls` now validate certificates by default. * Certificate validation will now use the system CA root certificates instead of ``certifi`` when possible (i.e. Python 2.7.9+ or 3.4+). This includes `.IOStream` and ``simple_httpclient``, but not ``curl_httpclient``. * The default SSL configuration has become stricter, using `ssl.create_default_context` where available on the client side. (On the server side, applications are encouraged to migrate from the ``ssl_options`` dict-based API to pass an `ssl.SSLContext` instead). * The deprecated classes in the `tornado.auth` module, ``GoogleMixin``, ``FacebookMixin``, and ``FriendFeedMixin`` have been removed. New modules: `tornado.locks` and `tornado.queues` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These modules provide classes for coordinating coroutines, merged from `Toro `_. To port your code from Toro's queues to Tornado 4.2, import `.Queue`, `.PriorityQueue`, or `.LifoQueue` from `tornado.queues` instead of from ``toro``. Use `.Queue` instead of Toro's ``JoinableQueue``. In Tornado the methods `~.Queue.join` and `~.Queue.task_done` are available on all queues, not on a special ``JoinableQueue``. Tornado queues raise exceptions specific to Tornado instead of reusing exceptions from the Python standard library. Therefore instead of catching the standard `queue.Empty` exception from `.Queue.get_nowait`, catch the special `tornado.queues.QueueEmpty` exception, and instead of catching the standard `queue.Full` from `.Queue.get_nowait`, catch `tornado.queues.QueueFull`. To port from Toro's locks to Tornado 4.2, import `.Condition`, `.Event`, `.Semaphore`, `.BoundedSemaphore`, or `.Lock` from `tornado.locks` instead of from ``toro``. Toro's ``Semaphore.wait`` allowed a coroutine to wait for the semaphore to be unlocked *without* acquiring it. This encouraged unorthodox patterns; in Tornado, just use `~.Semaphore.acquire`. Toro's ``Event.wait`` raised a ``Timeout`` exception after a timeout. In Tornado, `.Event.wait` raises ``tornado.gen.TimeoutError``. Toro's ``Condition.wait`` also raised ``Timeout``, but in Tornado, the `.Future` returned by `.Condition.wait` resolves to False after a timeout:: @gen.coroutine def await_notification(): if not (yield condition.wait(timeout=timedelta(seconds=1))): print('timed out') else: print('condition is true') In lock and queue methods, wherever Toro accepted ``deadline`` as a keyword argument, Tornado names the argument ``timeout`` instead. Toro's ``AsyncResult`` is not merged into Tornado, nor its exceptions ``NotReady`` and ``AlreadySet``. Use a `.Future` instead. If you wrote code like this:: from tornado import gen import toro result = toro.AsyncResult() @gen.coroutine def setter(): result.set(1) @gen.coroutine def getter(): value = yield result.get() print(value) # Prints "1". Then the Tornado equivalent is:: from tornado import gen from tornado.concurrent import Future result = Future() @gen.coroutine def setter(): result.set_result(1) @gen.coroutine def getter(): value = yield result print(value) # Prints "1". `tornado.autoreload` ~~~~~~~~~~~~~~~~~~~~ * Improved compatibility with Windows. * Fixed a bug in Python 3 if a module was imported during a reload check. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ * `.run_on_executor` now accepts arguments to control which attributes it uses to find the `.IOLoop` and executor. `tornado.curl_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~ * Fixed a bug that would cause the client to stop processing requests if an exception occurred in certain places while there is a queue. `tornado.escape` ~~~~~~~~~~~~~~~~ * `.xhtml_escape` now supports numeric character references in hex format (`` ``) `tornado.gen` ~~~~~~~~~~~~~ * `.WaitIterator` no longer uses weak references, which fixes several garbage-collection-related bugs. * ``tornado.gen.Multi`` and `tornado.gen.multi_future` (which are used when yielding a list or dict in a coroutine) now log any exceptions after the first if more than one `.Future` fails (previously they would be logged when the `.Future` was garbage-collected, but this is more reliable). Both have a new keyword argument ``quiet_exceptions`` to suppress logging of certain exception types; to use this argument you must call ``Multi`` or ``multi_future`` directly instead of simply yielding a list. * `.multi_future` now works when given multiple copies of the same `.Future`. * On Python 3, catching an exception in a coroutine no longer leads to leaks via ``Exception.__context__``. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * The ``raise_error`` argument now works correctly with the synchronous `.HTTPClient`. * The synchronous `.HTTPClient` no longer interferes with `.IOLoop.current()`. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * `.HTTPServer` is now a subclass of `tornado.util.Configurable`. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ * `.HTTPHeaders` can now be copied with `copy.copy` and `copy.deepcopy`. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * The `.IOLoop` constructor now has a ``make_current`` keyword argument to control whether the new `.IOLoop` becomes `.IOLoop.current()`. * Third-party implementations of `.IOLoop` should accept ``**kwargs`` in their ``IOLoop.initialize`` methods and pass them to the superclass implementation. * `.PeriodicCallback` is now more efficient when the clock jumps forward by a large amount. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * ``SSLIOStream.connect`` and `.IOStream.start_tls` now validate certificates by default. * New method `.SSLIOStream.wait_for_handshake` allows server-side applications to wait for the handshake to complete in order to verify client certificates or use NPN/ALPN. * The `.Future` returned by ``SSLIOStream.connect`` now resolves after the handshake is complete instead of as soon as the TCP connection is established. * Reduced logging of SSL errors. * `.BaseIOStream.read_until_close` now works correctly when a ``streaming_callback`` is given but ``callback`` is None (i.e. when it returns a `.Future`) `tornado.locale` ~~~~~~~~~~~~~~~~ * New method `.GettextLocale.pgettext` allows additional context to be supplied for gettext translations. `tornado.log` ~~~~~~~~~~~~~ * `.define_logging_options` now works correctly when given a non-default ``options`` object. `tornado.process` ~~~~~~~~~~~~~~~~~ * New method `.Subprocess.wait_for_exit` is a coroutine-friendly version of `.Subprocess.set_exit_callback`. `tornado.simple_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Improved performance on Python 3 by reusing a single `ssl.SSLContext`. * New constructor argument ``max_body_size`` controls the maximum response size the client is willing to accept. It may be bigger than ``max_buffer_size`` if ``streaming_callback`` is used. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ * `.TCPServer.handle_stream` may be a coroutine (so that any exceptions it raises will be logged). `tornado.util` ~~~~~~~~~~~~~~ * `.import_object` now supports unicode strings on Python 2. * `.Configurable.initialize` now supports positional arguments. `tornado.web` ~~~~~~~~~~~~~ * Key versioning support for cookie signing. ``cookie_secret`` application setting can now contain a dict of valid keys with version as key. The current signing key then must be specified via ``key_version`` setting. * Parsing of the ``If-None-Match`` header now follows the RFC and supports weak validators. * Passing ``secure=False`` or ``httponly=False`` to `.RequestHandler.set_cookie` now works as expected (previously only the presence of the argument was considered and its value was ignored). * `.RequestHandler.get_arguments` now requires that its ``strip`` argument be of type bool. This helps prevent errors caused by the slightly dissimilar interfaces between the singular and plural methods. * Errors raised in ``_handle_request_exception`` are now logged more reliably. * `.RequestHandler.redirect` now works correctly when called from a handler whose path begins with two slashes. * Passing messages containing ``%`` characters to `tornado.web.HTTPError` no longer causes broken error messages. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * The ``on_close`` method will no longer be called more than once. * When the other side closes a connection, we now echo the received close code back instead of sending an empty close frame. tornado-6.1.0/docs/releases/v4.2.1.rst000066400000000000000000000005051374705040500173060ustar00rootroot00000000000000What's new in Tornado 4.2.1 =========================== Jul 17, 2015 ------------ Security fix ~~~~~~~~~~~~ * This release fixes a path traversal vulnerability in `.StaticFileHandler`, in which files whose names *started with* the ``static_path`` directory but were not actually *in* that directory could be accessed. tornado-6.1.0/docs/releases/v4.3.0.rst000066400000000000000000000155111374705040500173110ustar00rootroot00000000000000What's new in Tornado 4.3 ========================= Nov 6, 2015 ----------- Highlights ~~~~~~~~~~ * The new async/await keywords in Python 3.5 are supported. In most cases, ``async def`` can be used in place of the ``@gen.coroutine`` decorator. Inside a function defined with ``async def``, use ``await`` instead of ``yield`` to wait on an asynchronous operation. Coroutines defined with async/await will be faster than those defined with ``@gen.coroutine`` and ``yield``, but do not support some features including ``Callback``/``Wait`` or the ability to yield a Twisted ``Deferred``. See :ref:`the users' guide ` for more. * The async/await keywords are also available when compiling with Cython in older versions of Python. Deprecation notice ~~~~~~~~~~~~~~~~~~ * This will be the last release of Tornado to support Python 2.6 or 3.2. Note that PyPy3 will continue to be supported even though it implements a mix of Python 3.2 and 3.3 features. Installation ~~~~~~~~~~~~ * Tornado has several new dependencies: ``ordereddict`` on Python 2.6, ``singledispatch`` on all Python versions prior to 3.4 (This was an optional dependency in prior versions of Tornado, and is now mandatory), and ``backports_abc>=0.4`` on all versions prior to 3.5. These dependencies will be installed automatically when installing with ``pip`` or ``setup.py install``. These dependencies will not be required when running on Google App Engine. * Binary wheels are provided for Python 3.5 on Windows (32 and 64 bit). `tornado.auth` ~~~~~~~~~~~~~~ * New method `.OAuth2Mixin.oauth2_request` can be used to make authenticated requests with an access token. * Now compatible with callbacks that have been compiled with Cython. `tornado.autoreload` ~~~~~~~~~~~~~~~~~~~~ * Fixed an issue with the autoreload command-line wrapper in which imports would be incorrectly interpreted as relative. `tornado.curl_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~ * Fixed parsing of multi-line headers. * ``allow_nonstandard_methods=True`` now bypasses body sanity checks, in the same way as in ``simple_httpclient``. * The ``PATCH`` method now allows a body without ``allow_nonstandard_methods=True``. `tornado.gen` ~~~~~~~~~~~~~ * `.WaitIterator` now supports the ``async for`` statement on Python 3.5. * ``@gen.coroutine`` can be applied to functions compiled with Cython. On python versions prior to 3.5, the ``backports_abc`` package must be installed for this functionality. * ``Multi`` and `.multi_future` are deprecated and replaced by a unified function `.multi`. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * `tornado.httpclient.HTTPError` is now copyable with the `copy` module. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * Requests containing both ``Content-Length`` and ``Transfer-Encoding`` will be treated as an error. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ * `.HTTPHeaders` can now be pickled and unpickled. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * ``IOLoop(make_current=True)`` now works as intended instead of raising an exception. * The Twisted and asyncio IOLoop implementations now clear ``current()`` when they exit, like the standard IOLoops. * `.IOLoop.add_callback` is faster in the single-threaded case. * `.IOLoop.add_callback` no longer raises an error when called on a closed IOLoop, but the callback will not be invoked. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * Coroutine-style usage of `.IOStream` now converts most errors into `.StreamClosedError`, which has the effect of reducing log noise from exceptions that are outside the application's control (especially SSL errors). * `.StreamClosedError` now has a ``real_error`` attribute which indicates why the stream was closed. It is the same as the ``error`` attribute of `.IOStream` but may be more easily accessible than the `.IOStream` itself. * Improved error handling in `~.BaseIOStream.read_until_close`. * Logging is less noisy when an SSL server is port scanned. * ``EINTR`` is now handled on all reads. `tornado.locale` ~~~~~~~~~~~~~~~~ * `tornado.locale.load_translations` now accepts encodings other than UTF-8. UTF-16 and UTF-8 will be detected automatically if a BOM is present; for other encodings `.load_translations` has an ``encoding`` parameter. `tornado.locks` ~~~~~~~~~~~~~~~ * `.Lock` and `.Semaphore` now support the ``async with`` statement on Python 3.5. `tornado.log` ~~~~~~~~~~~~~ * A new time-based log rotation mode is available with ``--log_rotate_mode=time``, ``--log-rotate-when``, and ``log-rotate-interval``. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * `.bind_sockets` now supports ``SO_REUSEPORT`` with the ``reuse_port=True`` argument. `tornado.options` ~~~~~~~~~~~~~~~~~ * Dashes and underscores are now fully interchangeable in option names. `tornado.queues` ~~~~~~~~~~~~~~~~ * `.Queue` now supports the ``async for`` statement on Python 3.5. `tornado.simple_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * When following redirects, ``streaming_callback`` and ``header_callback`` will no longer be run on the redirect responses (only the final non-redirect). * Responses containing both ``Content-Length`` and ``Transfer-Encoding`` will be treated as an error. `tornado.template` ~~~~~~~~~~~~~~~~~~ * `tornado.template.ParseError` now includes the filename in addition to line number. * Whitespace handling has become more configurable. The `.Loader` constructor now has a ``whitespace`` argument, there is a new ``template_whitespace`` `.Application` setting, and there is a new ``{% whitespace %}`` template directive. All of these options take a mode name defined in the `tornado.template.filter_whitespace` function. The default mode is ``single``, which is the same behavior as prior versions of Tornado. * Non-ASCII filenames are now supported. `tornado.testing` ~~~~~~~~~~~~~~~~~ * `.ExpectLog` objects now have a boolean ``logged_stack`` attribute to make it easier to test whether an exception stack trace was logged. `tornado.web` ~~~~~~~~~~~~~ * The hard limit of 4000 bytes per outgoing header has been removed. * `.StaticFileHandler` returns the correct ``Content-Type`` for files with ``.gz``, ``.bz2``, and ``.xz`` extensions. * Responses smaller than 1000 bytes will no longer be compressed. * The default gzip compression level is now 6 (was 9). * Fixed a regression in Tornado 4.2.1 that broke `.StaticFileHandler` with a ``path`` of ``/``. * `tornado.web.HTTPError` is now copyable with the `copy` module. * The exception `.Finish` now accepts an argument which will be passed to the method `.RequestHandler.finish`. * New `.Application` setting ``xsrf_cookie_kwargs`` can be used to set additional attributes such as ``secure`` or ``httponly`` on the XSRF cookie. * `.Application.listen` now returns the `.HTTPServer` it created. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * Fixed handling of continuation frames when compression is enabled. tornado-6.1.0/docs/releases/v4.4.0.rst000066400000000000000000000051161374705040500173120ustar00rootroot00000000000000What's new in Tornado 4.4 ========================= Jul 15, 2016 ------------ General ~~~~~~~ * Tornado now requires Python 2.7 or 3.3+; versions 2.6 and 3.2 are no longer supported. Pypy3 is still supported even though its latest release is mainly based on Python 3.2. * The `monotonic `_ package is now supported as an alternative to `Monotime `_ for monotonic clock support on Python 2. ``tornado.curl_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Failures in ``_curl_setup_request`` no longer cause the ``max_clients`` pool to be exhausted. * Non-ascii header values are now handled correctly. `tornado.gen` ~~~~~~~~~~~~~ * `.with_timeout` now accepts any yieldable object (except ``YieldPoint``), not just `tornado.concurrent.Future`. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * The errors raised by timeouts now indicate what state the request was in; the error message is no longer simply "599 Timeout". * Calling `repr` on a `tornado.httpclient.HTTPError` no longer raises an error. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * Int-like enums (including `http.HTTPStatus`) can now be used as status codes. * Responses with status code ``204 No Content`` no longer emit a ``Content-Length: 0`` header. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * Improved performance when there are large numbers of active timeouts. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * All included `.Resolver` implementations raise `IOError` (or a subclass) for any resolution failure. `tornado.options` ~~~~~~~~~~~~~~~~~ * Options can now be modified with subscript syntax in addition to attribute syntax. * The special variable ``__file__`` is now available inside config files. ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * HTTP/1.0 (not 1.1) responses without a ``Content-Length`` header now work correctly. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ * `.TCPServer.bind` now accepts a ``reuse_port`` argument. `tornado.testing` ~~~~~~~~~~~~~~~~~ * Test sockets now always use ``127.0.0.1`` instead of ``localhost``. This avoids conflicts when the automatically-assigned port is available on IPv4 but not IPv6, or in unusual network configurations when ``localhost`` has multiple IP addresses. `tornado.web` ~~~~~~~~~~~~~ * ``image/svg+xml`` is now on the list of compressible mime types. * Fixed an error on Python 3 when compression is used with multiple ``Vary`` headers. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * ``WebSocketHandler.__init__`` now uses `super`, which improves support for multiple inheritance. tornado-6.1.0/docs/releases/v4.4.1.rst000066400000000000000000000003701374705040500173100ustar00rootroot00000000000000What's new in Tornado 4.4.1 =========================== Jul 23, 2016 ------------ `tornado.web` ~~~~~~~~~~~~~ * Fixed a regression in Tornado 4.4 which caused URL regexes containing backslash escapes outside capturing groups to be rejected. tornado-6.1.0/docs/releases/v4.4.2.rst000066400000000000000000000013631374705040500173140ustar00rootroot00000000000000What's new in Tornado 4.4.2 =========================== Oct 1, 2016 ------------ Security fixes ~~~~~~~~~~~~~~ * A difference in cookie parsing between Tornado and web browsers (especially when combined with Google Analytics) could allow an attacker to set arbitrary cookies and bypass XSRF protection. The cookie parser has been rewritten to fix this attack. Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Cookies containing certain special characters (in particular semicolon and square brackets) are now parsed differently. * If the cookie header contains a combination of valid and invalid cookies, the valid ones will be returned (older versions of Tornado would reject the entire header for a single invalid cookie). tornado-6.1.0/docs/releases/v4.4.3.rst000066400000000000000000000004241374705040500173120ustar00rootroot00000000000000What's new in Tornado 4.4.3 =========================== Mar 30, 2017 ------------ Bug fixes ~~~~~~~~~ * The `tornado.auth` module has been updated for compatibility with `a change to Facebook's access_token endpoint. `_ tornado-6.1.0/docs/releases/v4.5.0.rst000066400000000000000000000157531374705040500173230ustar00rootroot00000000000000What's new in Tornado 4.5 ========================= Apr 16, 2017 ------------ Backwards-compatibility warning ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The `tornado.websocket` module now imposes a limit on the size of incoming messages, which defaults to 10MiB. New module ~~~~~~~~~~ - `tornado.routing` provides a more flexible routing system than the one built in to `.Application`. General changes ~~~~~~~~~~~~~~~ - Reduced the number of circular references, reducing memory usage and improving performance. `tornado.auth` ~~~~~~~~~~~~~~ * The `tornado.auth` module has been updated for compatibility with `a change to Facebook's access_token endpoint `_. This includes both the changes initially released in Tornado 4.4.3 and an additional change to support the ```session_expires`` field in the new format. The ``session_expires`` field is currently a string; it should be accessed as ``int(user['session_expires'])`` because it will change from a string to an int in Tornado 5.0. `tornado.autoreload` ~~~~~~~~~~~~~~~~~~~~ - Autoreload is now compatible with the `asyncio` event loop. - Autoreload no longer attempts to close the `.IOLoop` and all registered file descriptors before restarting; it relies on the ``CLOEXEC`` flag being set instead. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ - Suppressed some "'NoneType' object not callback" messages that could be logged at shutdown. `tornado.gen` ~~~~~~~~~~~~~ - ``yield None`` is now equivalent to ``yield gen.moment``. `~tornado.gen.moment` is deprecated. This improves compatibility with `asyncio`. - Fixed an issue in which a generator object could be garbage collected prematurely (most often when weak references are used. - New function `.is_coroutine_function` identifies functions wrapped by `.coroutine` or ``engine``. ``tornado.http1connection`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``Transfer-Encoding`` header is now parsed case-insensitively. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ - ``SimpleAsyncHTTPClient`` now follows 308 redirects. - ``CurlAsyncHTTPClient`` will no longer accept protocols other than ``http`` and ``https``. To override this, set ``pycurl.PROTOCOLS`` and ``pycurl.REDIR_PROTOCOLS`` in a ``prepare_curl_callback``. - ``CurlAsyncHTTPClient`` now supports digest authentication for proxies (in addition to basic auth) via the new ``proxy_auth_mode`` argument. - The minimum supported version of ``libcurl`` is now ``7.22.0``. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ - `.HTTPServer` now accepts the keyword argument ``trusted_downstream`` which controls the parsing of ``X-Forwarded-For`` headers. This header may be a list or set of IP addresses of trusted proxies which will be skipped in the ``X-Forwarded-For`` list. - The ``no_keep_alive`` argument works again. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ - `.url_concat` correctly handles fragments and existing query arguments. `tornado.ioloop` ~~~~~~~~~~~~~~~~ - Fixed 100% CPU usage after a callback returns an empty list or dict. - `.IOLoop.add_callback` now uses a lockless implementation which makes it safe for use from ``__del__`` methods. This improves performance of calls to `~.IOLoop.add_callback` from the `.IOLoop` thread, and slightly decreases it for calls from other threads. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ - `memoryview` objects are now permitted as arguments to `~.BaseIOStream.write`. - The internal memory buffers used by `.IOStream` now use `bytearray` instead of a list of `bytes`, improving performance. - Futures returned by `~.BaseIOStream.write` are no longer orphaned if a second call to ``write`` occurs before the previous one is finished. `tornado.log` ~~~~~~~~~~~~~ - Colored log output is now supported on Windows if the `colorama `_ library is installed and the application calls ``colorama.init()`` at startup. - The signature of the `.LogFormatter` constructor has been changed to make it compatible with `logging.config.dictConfig`. `tornado.netutil` ~~~~~~~~~~~~~~~~~ - Worked around an issue that caused "LookupError: unknown encoding: latin1" errors on Solaris. `tornado.process` ~~~~~~~~~~~~~~~~~ - `.Subprocess` no longer causes "subprocess still running" warnings on Python 3.6. - Improved error handling in `.cpu_count`. `tornado.tcpclient` ~~~~~~~~~~~~~~~~~~~ - `.TCPClient` now supports a ``source_ip`` and ``source_port`` argument. - Improved error handling for environments where IPv6 support is incomplete. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ - `.TCPServer.handle_stream` implementations may now be native coroutines. - Stopping a `.TCPServer` twice no longer raises an exception. `tornado.web` ~~~~~~~~~~~~~ - `.RedirectHandler` now supports substituting parts of the matched URL into the redirect location using `str.format` syntax. - New methods `.RequestHandler.render_linked_js`, `.RequestHandler.render_embed_js`, `.RequestHandler.render_linked_css`, and `.RequestHandler.render_embed_css` can be overridden to customize the output of `.UIModule`. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ - `.WebSocketHandler.on_message` implementations may now be coroutines. New messages will not be processed until the previous ``on_message`` coroutine has finished. - The ``websocket_ping_interval`` and ``websocket_ping_timeout`` application settings can now be used to enable a periodic ping of the websocket connection, allowing dropped connections to be detected and closed. - The new ``websocket_max_message_size`` setting defaults to 10MiB. The connection will be closed if messages larger than this are received. - Headers set by `.RequestHandler.prepare` or `.RequestHandler.set_default_headers` are now sent as a part of the websocket handshake. - Return values from `.WebSocketHandler.get_compression_options` may now include the keys ``compression_level`` and ``mem_level`` to set gzip parameters. The default compression level is now 6 instead of 9. Demos ~~~~~ - A new file upload demo is available in the `file_upload `_ directory. - A new `.TCPClient` and `.TCPServer` demo is available in the `tcpecho `_ directory. - Minor updates have been made to several existing demos, including updates to more recent versions of jquery. Credits ~~~~~~~ The following people contributed commits to this release: - A\. Jesse Jiryu Davis - Aaron Opfer - Akihiro Yamazaki - Alexander - Andreas Røsdal - Andrew Rabert - Andrew Sumin - Antoine Pietri - Antoine Pitrou - Artur Stawiarski - Ben Darnell - Brian Mego - Dario - Doug Vargas - Eugene Dubovoy - Iver Jordal - JZQT - James Maier - Jeff Hunter - Leynos - Mark Henderson - Michael V. DePalatis - Min RK - Mircea Ulinic - Ping - Ping Yang - Riccardo Magliocchetti - Samuel Chen - Samuel Dion-Girardeau - Scott Meisburger - Shawn Ding - TaoBeier - Thomas Kluyver - Vadim Semenov - matee - mike820324 - stiletto - zhimin - ä¾äº‘ tornado-6.1.0/docs/releases/v4.5.1.rst000066400000000000000000000004351374705040500173130ustar00rootroot00000000000000What's new in Tornado 4.5.1 =========================== Apr 20, 2017 ------------ `tornado.log` ~~~~~~~~~~~~~ - Improved detection of libraries for colorized logging. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ - `.url_concat` once again treats None as equivalent to an empty sequence. tornado-6.1.0/docs/releases/v4.5.2.rst000066400000000000000000000005031374705040500173100ustar00rootroot00000000000000What's new in Tornado 4.5.2 =========================== Aug 27, 2017 ------------ Bug Fixes ~~~~~~~~~ - Tornado now sets the ``FD_CLOEXEC`` flag on all file descriptors it creates. This prevents hanging client connections and resource leaks when the `tornado.autoreload` module (or ``Application(debug=True)``) is used. tornado-6.1.0/docs/releases/v4.5.3.rst000066400000000000000000000022451374705040500173160ustar00rootroot00000000000000What's new in Tornado 4.5.3 =========================== Jan 6, 2018 ------------ `tornado.curl_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~ - Improved debug logging on Python 3. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ - ``Content-Length`` and ``Transfer-Encoding`` headers are no longer sent with 1xx or 204 responses (this was already true of 304 responses). - Reading chunked requests no longer leaves the connection in a broken state. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ - Writing a `memoryview` can no longer result in "BufferError: Existing exports of data: object cannot be re-sized". `tornado.options` ~~~~~~~~~~~~~~~~~ - Duplicate option names are now detected properly whether they use hyphens or underscores. `tornado.testing` ~~~~~~~~~~~~~~~~~ - `.AsyncHTTPTestCase.fetch` now uses ``127.0.0.1`` instead of ``localhost``, improving compatibility with systems that have partially-working ipv6 stacks. `tornado.web` ~~~~~~~~~~~~~ - It is no longer allowed to send a body with 1xx or 204 responses. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ - Requests with invalid websocket headers now get a response with status code 400 instead of a closed connection. tornado-6.1.0/docs/releases/v5.0.0.rst000066400000000000000000000314721374705040500173130ustar00rootroot00000000000000What's new in Tornado 5.0 ========================= Mar 5, 2018 ----------- Highlights ~~~~~~~~~~ - The focus of this release is improving integration with `asyncio`. On Python 3, the `.IOLoop` is always a wrapper around the `asyncio` event loop, and `asyncio.Future` and `asyncio.Task` are used instead of their Tornado counterparts. This means that libraries based on `asyncio` can be mixed relatively seamlessly with those using Tornado. While care has been taken to minimize the disruption from this change, code changes may be required for compatibility with Tornado 5.0, as detailed in the following section. - Tornado 5.0 supports Python 2.7.9+ and 3.4+. Python 2.7 and 3.4 are deprecated and support for them will be removed in Tornado 6.0, which will require Python 3.5+. Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Python 3.3 is no longer supported. - Versions of Python 2.7 that predate the `ssl` module update are no longer supported. (The `ssl` module was updated in version 2.7.9, although in some distributions the updates are present in builds with a lower version number. Tornado requires `ssl.SSLContext`, `ssl.create_default_context`, and `ssl.match_hostname`) - Versions of Python 3.5 prior to 3.5.2 are no longer supported due to a change in the async iterator protocol in that version. - The ``trollius`` project (`asyncio` backported to Python 2) is no longer supported. - `tornado.concurrent.Future` is now an alias for `asyncio.Future` when running on Python 3. This results in a number of minor behavioral changes: - `.Future` objects can only be created while there is a current `.IOLoop` - The timing of callbacks scheduled with ``Future.add_done_callback`` has changed. `tornado.concurrent.future_add_done_callback` can be used to make the behavior more like older versions of Tornado (but not identical). Some of these changes are also present in the Python 2 version of `tornado.concurrent.Future` to minimize the difference between Python 2 and 3. - Cancellation is now partially supported, via `asyncio.Future.cancel`. A canceled `.Future` can no longer have its result set. Applications that handle `~asyncio.Future` objects directly may want to use `tornado.concurrent.future_set_result_unless_cancelled`. In native coroutines, cancellation will cause an exception to be raised in the coroutine. - The ``exc_info`` and ``set_exc_info`` methods are no longer present. Use `tornado.concurrent.future_set_exc_info` to replace the latter, and raise the exception with `~asyncio.Future.result` to replace the former. - ``io_loop`` arguments to many Tornado functions have been removed. Use `.IOLoop.current()` instead of passing `.IOLoop` objects explicitly. - On Python 3, `.IOLoop` is always a wrapper around the `asyncio` event loop. ``IOLoop.configure`` is effectively removed on Python 3 (for compatibility, it may be called to redundantly specify the `asyncio`-backed `.IOLoop`) - `.IOLoop.instance` is now a deprecated alias for `.IOLoop.current`. Applications that need the cross-thread communication behavior facilitated by `.IOLoop.instance` should use their own global variable instead. Other notes ~~~~~~~~~~~ - The ``futures`` (`concurrent.futures` backport) package is now required on Python 2.7. - The ``certifi`` and ``backports.ssl-match-hostname`` packages are no longer required on Python 2.7. - Python 3.6 or higher is recommended, because it features more efficient garbage collection of `asyncio.Future` objects. `tornado.auth` ~~~~~~~~~~~~~~ - `.GoogleOAuth2Mixin` now uses a newer set of URLs. `tornado.autoreload` ~~~~~~~~~~~~~~~~~~~~ - On Python 3, uses ``__main__.__spec`` to more reliably reconstruct the original command line and avoid modifying ``PYTHONPATH``. - The ``io_loop`` argument to `tornado.autoreload.start` has been removed. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ - `tornado.concurrent.Future` is now an alias for `asyncio.Future` when running on Python 3. See "Backwards-compatibility notes" for more. - Setting the result of a ``Future`` no longer blocks while callbacks are being run. Instead, the callbacks are scheduled on the next `.IOLoop` iteration. - The deprecated alias ``tornado.concurrent.TracebackFuture`` has been removed. - `tornado.concurrent.chain_future` now works with all three kinds of ``Futures`` (Tornado, `asyncio`, and `concurrent.futures`) - The ``io_loop`` argument to `tornado.concurrent.run_on_executor` has been removed. - New functions `.future_set_result_unless_cancelled`, `.future_set_exc_info`, and `.future_add_done_callback` help mask the difference between `asyncio.Future` and Tornado's previous ``Future`` implementation. `tornado.curl_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~ - Improved debug logging on Python 3. - The ``time_info`` response attribute now includes ``appconnect`` in addition to other measurements. - Closing a `.CurlAsyncHTTPClient` now breaks circular references that could delay garbage collection. - The ``io_loop`` argument to the `.CurlAsyncHTTPClient` constructor has been removed. `tornado.gen` ~~~~~~~~~~~~~ - ``tornado.gen.TimeoutError`` is now an alias for `tornado.util.TimeoutError`. - Leak detection for ``Futures`` created by this module now attributes them to their proper caller instead of the coroutine machinery. - Several circular references that could delay garbage collection have been broken up. - On Python 3, `asyncio.Task` is used instead of the Tornado coroutine runner. This improves compatibility with some `asyncio` libraries and adds support for cancellation. - The ``io_loop`` arguments to ``YieldFuture`` and `.with_timeout` have been removed. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ - The ``io_loop`` argument to all `.AsyncHTTPClient` constructors has been removed. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ - It is now possible for a client to reuse a connection after sending a chunked request. - If a client sends a malformed request, the server now responds with a 400 error instead of simply closing the connection. - ``Content-Length`` and ``Transfer-Encoding`` headers are no longer sent with 1xx or 204 responses (this was already true of 304 responses). - When closing a connection to a HTTP/1.1 client, the ``Connection: close`` header is sent with the response. - The ``io_loop`` argument to the `.HTTPServer` constructor has been removed. - If more than one ``X-Scheme`` or ``X-Forwarded-Proto`` header is present, only the last is used. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ - The string representation of `.HTTPServerRequest` objects (which are sometimes used in log messages) no longer includes the request headers. - New function `.qs_to_qsl` converts the result of `urllib.parse.parse_qs` to name-value pairs. `tornado.ioloop` ~~~~~~~~~~~~~~~~ - ``tornado.ioloop.TimeoutError`` is now an alias for `tornado.util.TimeoutError`. - `.IOLoop.instance` is now a deprecated alias for `.IOLoop.current`. - `.IOLoop.install` and `.IOLoop.clear_instance` are deprecated. - The ``IOLoop.initialized`` method has been removed. - On Python 3, the `asyncio`-backed `.IOLoop` is always used and alternative `.IOLoop` implementations cannot be configured. `.IOLoop.current` and related methods pass through to `asyncio.get_event_loop`. - `~.IOLoop.run_sync` cancels its argument on a timeout. This results in better stack traces (and avoids log messages about leaks) in native coroutines. - New methods `.IOLoop.run_in_executor` and `.IOLoop.set_default_executor` make it easier to run functions in other threads from native coroutines (since `concurrent.futures.Future` does not support ``await``). - ``PollIOLoop`` (the default on Python 2) attempts to detect misuse of `.IOLoop` instances across `os.fork`. - The ``io_loop`` argument to `.PeriodicCallback` has been removed. - It is now possible to create a `.PeriodicCallback` in one thread and start it in another without passing an explicit event loop. - The ``IOLoop.set_blocking_signal_threshold`` and ``IOLoop.set_blocking_log_threshold`` methods are deprecated because they are not implemented for the `asyncio` event loop`. Use the ``PYTHONASYNCIODEBUG=1`` environment variable instead. - `.IOLoop.clear_current` now works if it is called before any current loop is established. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ - The ``io_loop`` argument to the `.IOStream` constructor has been removed. - New method `.BaseIOStream.read_into` provides a minimal-copy alternative to `.BaseIOStream.read_bytes`. - `.BaseIOStream.write` is now much more efficient for very large amounts of data. - Fixed some cases in which ``IOStream.error`` could be inaccurate. - Writing a `memoryview` can no longer result in "BufferError: Existing exports of data: object cannot be re-sized". `tornado.locks` ~~~~~~~~~~~~~~~ - As a side effect of the ``Future`` changes, waiters are always notified asynchronously with respect to `.Condition.notify`. `tornado.netutil` ~~~~~~~~~~~~~~~~~ - The default `.Resolver` now uses `.IOLoop.run_in_executor`. `.ExecutorResolver`, `.BlockingResolver`, and `.ThreadedResolver` are deprecated. - The ``io_loop`` arguments to `.add_accept_handler`, `.ExecutorResolver`, and `.ThreadedResolver` have been removed. - `.add_accept_handler` returns a callable which can be used to remove all handlers that were added. - `.OverrideResolver` now accepts per-family overrides. `tornado.options` ~~~~~~~~~~~~~~~~~ - Duplicate option names are now detected properly whether they use hyphens or underscores. `tornado.platform.asyncio` ~~~~~~~~~~~~~~~~~~~~~~~~~~ - `.AsyncIOLoop` and `.AsyncIOMainLoop` are now used automatically when appropriate; referencing them explicitly is no longer recommended. - Starting an `.IOLoop` or making it current now also sets the `asyncio` event loop for the current thread. Closing an `.IOLoop` closes the corresponding `asyncio` event loop. - `.to_tornado_future` and `.to_asyncio_future` are deprecated since they are now no-ops. - `~.AnyThreadEventLoopPolicy` can now be used to easily allow the creation of event loops on any thread (similar to Tornado's prior policy). `tornado.platform.caresresolver` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``io_loop`` argument to `.CaresResolver` has been removed. `tornado.platform.twisted` ~~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``io_loop`` arguments to ``TornadoReactor``, `.TwistedResolver`, and ``tornado.platform.twisted.install`` have been removed. `tornado.process` ~~~~~~~~~~~~~~~~~ - The ``io_loop`` argument to the `.Subprocess` constructor and `.Subprocess.initialize` has been removed. `tornado.routing` ~~~~~~~~~~~~~~~~~ - A default 404 response is now generated if no delegate is found for a request. `tornado.simple_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``io_loop`` argument to `.SimpleAsyncHTTPClient` has been removed. - TLS is now configured according to `ssl.create_default_context` by default. `tornado.tcpclient` ~~~~~~~~~~~~~~~~~~~ - The ``io_loop`` argument to the `.TCPClient` constructor has been removed. - `.TCPClient.connect` has a new ``timeout`` argument. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ - The ``io_loop`` argument to the `.TCPServer` constructor has been removed. - `.TCPServer` no longer logs ``EBADF`` errors during shutdown. `tornado.testing` ~~~~~~~~~~~~~~~~~ - The deprecated ``tornado.testing.get_unused_port`` and ``tornado.testing.LogTrapTestCase`` have been removed. - `.AsyncHTTPTestCase.fetch` now supports absolute URLs. - `.AsyncHTTPTestCase.fetch` now connects to ``127.0.0.1`` instead of ``localhost`` to be more robust against faulty ipv6 configurations. `tornado.util` ~~~~~~~~~~~~~~ - `tornado.util.TimeoutError` replaces ``tornado.gen.TimeoutError`` and ``tornado.ioloop.TimeoutError``. - `.Configurable` now supports configuration at multiple levels of an inheritance hierarchy. `tornado.web` ~~~~~~~~~~~~~ - `.RequestHandler.set_status` no longer requires that the given status code appear in `http.client.responses`. - It is no longer allowed to send a body with 1xx or 204 responses. - Exception handling now breaks up reference cycles that could delay garbage collection. - `.RedirectHandler` now copies any query arguments from the request to the redirect location. - If both ``If-None-Match`` and ``If-Modified-Since`` headers are present in a request to `.StaticFileHandler`, the latter is now ignored. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ - The C accelerator now operates on multiple bytes at a time to improve performance. - Requests with invalid websocket headers now get a response with status code 400 instead of a closed connection. - `.WebSocketHandler.write_message` now raises `.WebSocketClosedError` if the connection closes while the write is in progress. - The ``io_loop`` argument to `.websocket_connect` has been removed. tornado-6.1.0/docs/releases/v5.0.1.rst000066400000000000000000000004271374705040500173100ustar00rootroot00000000000000What's new in Tornado 5.0.1 =========================== Mar 18, 2018 ------------ Bug fix ~~~~~~~ - This release restores support for versions of Python 3.4 prior to 3.4.4. This is important for compatibility with Debian Jessie which has 3.4.2 as its version of Python 3. tornado-6.1.0/docs/releases/v5.0.2.rst000066400000000000000000000011551374705040500173100ustar00rootroot00000000000000What's new in Tornado 5.0.2 =========================== Apr 7, 2018 ----------- Bug fixes ~~~~~~~~~ - Fixed a memory leak when `.IOLoop` objects are created and destroyed. - If `.AsyncTestCase.get_new_ioloop` returns a reference to a preexisting event loop (typically when it has been overridden to return `.IOLoop.current()`), the test's ``tearDown`` method will not close this loop. - Fixed a confusing error message when the synchronous `.HTTPClient` fails to initialize because an event loop is already running. - `.PeriodicCallback` no longer executes twice in a row due to backwards clock adjustments. tornado-6.1.0/docs/releases/v5.1.0.rst000066400000000000000000000172401374705040500173110ustar00rootroot00000000000000What's new in Tornado 5.1 ========================= July 12, 2018 ------------- Deprecation notice ~~~~~~~~~~~~~~~~~~ - Tornado 6.0 will drop support for Python 2.7 and 3.4. The minimum supported Python version will be 3.5.2. - The ``tornado.stack_context`` module is deprecated and will be removed in Tornado 6.0. The reason for this is that it is not feasible to provide this module's semantics in the presence of ``async def`` native coroutines. ``ExceptionStackContext`` is mainly obsolete thanks to coroutines. ``StackContext`` lacks a direct replacement although the new ``contextvars`` package (in the Python standard library beginning in Python 3.7) may be an alternative. - Callback-oriented code often relies on ``ExceptionStackContext`` to handle errors and prevent leaked connections. In order to avoid the risk of silently introducing subtle leaks (and to consolidate all of Tornado's interfaces behind the coroutine pattern), ``callback`` arguments throughout the package are deprecated and will be removed in version 6.0. All functions that had a ``callback`` argument removed now return a `.Future` which should be used instead. - Where possible, deprecation warnings are emitted when any of these deprecated interfaces is used. However, Python does not display deprecation warnings by default. To prepare your application for Tornado 6.0, run Python with the ``-Wd`` argument or set the environment variable ``PYTHONWARNINGS`` to ``d``. If your application runs on Python 3 without deprecation warnings, it should be able to move to Tornado 6.0 without disruption. `tornado.auth` ~~~~~~~~~~~~~~ - `.OAuthMixin._oauth_get_user_future` may now be a native coroutine. - All ``callback`` arguments in this package are deprecated and will be removed in 6.0. Use the coroutine interfaces instead. - The ``OAuthMixin._oauth_get_user`` method is deprecated and will be removed in 6.0. Override `~.OAuthMixin._oauth_get_user_future` instead. `tornado.autoreload` ~~~~~~~~~~~~~~~~~~~~ - The command-line autoreload wrapper is now preserved if an internal autoreload fires. - The command-line wrapper no longer starts duplicated processes on windows when combined with internal autoreload. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ - `.run_on_executor` now returns `.Future` objects that are compatible with ``await``. - The ``callback`` argument to `.run_on_executor` is deprecated and will be removed in 6.0. - ``return_future`` is deprecated and will be removed in 6.0. `tornado.gen` ~~~~~~~~~~~~~ - Some older portions of this module are deprecated and will be removed in 6.0. This includes ``engine``, ``YieldPoint``, ``Callback``, ``Wait``, ``WaitAll``, ``MultiYieldPoint``, and ``Task``. - Functions decorated with ``@gen.coroutine`` will no longer accept ``callback`` arguments in 6.0. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ - The behavior of ``raise_error=False`` is changing in 6.0. Currently it suppresses all errors; in 6.0 it will only suppress the errors raised due to completed responses with non-200 status codes. - The ``callback`` argument to `.AsyncHTTPClient.fetch` is deprecated and will be removed in 6.0. - `tornado.httpclient.HTTPError` has been renamed to `.HTTPClientError` to avoid ambiguity in code that also has to deal with `tornado.web.HTTPError`. The old name remains as an alias. - ``tornado.curl_httpclient`` now supports non-ASCII characters in username and password arguments. - ``.HTTPResponse.request_time`` now behaves consistently across ``simple_httpclient`` and ``curl_httpclient``, excluding time spent in the ``max_clients`` queue in both cases (previously this time was included in ``simple_httpclient`` but excluded in ``curl_httpclient``). In both cases the time is now computed using a monotonic clock where available. - `.HTTPResponse` now has a ``start_time`` attribute recording a wall-clock (`time.time`) timestamp at which the request started (after leaving the ``max_clients`` queue if applicable). `tornado.httputil` ~~~~~~~~~~~~~~~~~~ - `.parse_multipart_form_data` now recognizes non-ASCII filenames in RFC 2231/5987 (``filename*=``) format. - ``HTTPServerRequest.write`` is deprecated and will be removed in 6.0. Use the methods of ``request.connection`` instead. - Malformed HTTP headers are now logged less noisily. `tornado.ioloop` ~~~~~~~~~~~~~~~~ - `.PeriodicCallback` now supports a ``jitter`` argument to randomly vary the timeout. - ``IOLoop.set_blocking_signal_threshold``, ``IOLoop.set_blocking_log_threshold``, ``IOLoop.log_stack``, and ``IOLoop.handle_callback_exception`` are deprecated and will be removed in 6.0. - Fixed a `KeyError` in `.IOLoop.close` when `.IOLoop` objects are being opened and closed in multiple threads. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ - All ``callback`` arguments in this module are deprecated except for `.BaseIOStream.set_close_callback`. They will be removed in 6.0. - ``streaming_callback`` arguments to `.BaseIOStream.read_bytes` and `.BaseIOStream.read_until_close` are deprecated and will be removed in 6.0. `tornado.netutil` ~~~~~~~~~~~~~~~~~ - Improved compatibility with GNU Hurd. `tornado.options` ~~~~~~~~~~~~~~~~~ - `tornado.options.parse_config_file` now allows setting options to strings (which will be parsed the same way as `tornado.options.parse_command_line`) in addition to the specified type for the option. `tornado.platform.twisted` ~~~~~~~~~~~~~~~~~~~~~~~~~~ - ``TornadoReactor`` and ``TwistedIOLoop`` are deprecated and will be removed in 6.0. Instead, Tornado will always use the asyncio event loop and twisted can be configured to do so as well. ``tornado.stack_context`` ~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``tornado.stack_context`` module is deprecated and will be removed in 6.0. `tornado.testing` ~~~~~~~~~~~~~~~~~ - `.AsyncHTTPTestCase.fetch` now takes a ``raise_error`` argument. This argument has the same semantics as `.AsyncHTTPClient.fetch`, but defaults to false because tests often need to deal with non-200 responses (and for backwards-compatibility). - The `.AsyncTestCase.stop` and `.AsyncTestCase.wait` methods are deprecated. `tornado.web` ~~~~~~~~~~~~~ - New method `.RequestHandler.detach` can be used from methods that are not decorated with ``@asynchronous`` (the decorator was required to use ``self.request.connection.detach()``. - `.RequestHandler.finish` and `.RequestHandler.render` now return ``Futures`` that can be used to wait for the last part of the response to be sent to the client. - `.FallbackHandler` now calls ``on_finish`` for the benefit of subclasses that may have overridden it. - The ``asynchronous`` decorator is deprecated and will be removed in 6.0. - The ``callback`` argument to `.RequestHandler.flush` is deprecated and will be removed in 6.0. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ - When compression is enabled, memory limits now apply to the post-decompression size of the data, protecting against DoS attacks. - `.websocket_connect` now supports subprotocols. - `.WebSocketHandler` and `.WebSocketClientConnection` now have ``selected_subprotocol`` attributes to see the subprotocol in use. - The `.WebSocketHandler.select_subprotocol` method is now called with an empty list instead of a list containing an empty string if no subprotocols were requested by the client. - `.WebSocketHandler.open` may now be a coroutine. - The ``data`` argument to `.WebSocketHandler.ping` is now optional. - Client-side websocket connections no longer buffer more than one message in memory at a time. - Exception logging now uses `.RequestHandler.log_exception`. `tornado.wsgi` ~~~~~~~~~~~~~~ - ``WSGIApplication`` and ``WSGIAdapter`` are deprecated and will be removed in Tornado 6.0. tornado-6.1.0/docs/releases/v5.1.1.rst000066400000000000000000000006061374705040500173100ustar00rootroot00000000000000What's new in Tornado 5.1.1 =========================== Sep 16, 2018 ------------ Bug fixes ~~~~~~~~~ - Fixed an case in which the `.Future` returned by `.RequestHandler.finish` could fail to resolve. - The `.TwitterMixin.authenticate_redirect` method works again. - Improved error handling in the `tornado.auth` module, fixing hanging requests when a network or other error occurs. tornado-6.1.0/docs/releases/v6.0.0.rst000066400000000000000000000127051374705040500173120ustar00rootroot00000000000000What's new in Tornado 6.0 ========================= Mar 1, 2019 ----------- Backwards-incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Python 2.7 and 3.4 are no longer supported; the minimum supported Python version is 3.5.2. - APIs deprecated in Tornado 5.1 have been removed. This includes the ``tornado.stack_context`` module and most ``callback`` arguments throughout the package. All removed APIs emitted `DeprecationWarning` when used in Tornado 5.1, so running your application with the ``-Wd`` Python command-line flag or the environment variable ``PYTHONWARNINGS=d`` should tell you whether your application is ready to move to Tornado 6.0. - ``.WebSocketHandler.get`` is now a coroutine and must be called accordingly in any subclasses that override this method (but note that overriding ``get`` is not recommended; either ``prepare`` or ``open`` should be used instead). General changes ~~~~~~~~~~~~~~~ - Tornado now includes type annotations compatible with ``mypy``. These annotations will be used when type-checking your application with ``mypy``, and may be usable in editors and other tools. - Tornado now uses native coroutines internally, improving performance. `tornado.auth` ~~~~~~~~~~~~~~ - All ``callback`` arguments in this package have been removed. Use the coroutine interfaces instead. - The ``OAuthMixin._oauth_get_user`` method has been removed. Override `~.OAuthMixin._oauth_get_user_future` instead. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ - The ``callback`` argument to `.run_on_executor` has been removed. - ``return_future`` has been removed. `tornado.gen` ~~~~~~~~~~~~~ - Some older portions of this module have been removed. This includes ``engine``, ``YieldPoint``, ``Callback``, ``Wait``, ``WaitAll``, ``MultiYieldPoint``, and ``Task``. - Functions decorated with ``@gen.coroutine`` no longer accept ``callback`` arguments. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ - The behavior of ``raise_error=False`` has changed. Now only suppresses the errors raised due to completed responses with non-200 status codes (previously it suppressed all errors). - The ``callback`` argument to `.AsyncHTTPClient.fetch` has been removed. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ - ``HTTPServerRequest.write`` has been removed. Use the methods of ``request.connection`` instead. - Unrecognized ``Content-Encoding`` values now log warnings only for content types that we would otherwise attempt to parse. `tornado.ioloop` ~~~~~~~~~~~~~~~~ - ``IOLoop.set_blocking_signal_threshold``, ``IOLoop.set_blocking_log_threshold``, ``IOLoop.log_stack``, and ``IOLoop.handle_callback_exception`` have been removed. - Improved performance of `.IOLoop.add_callback`. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ - All ``callback`` arguments in this module have been removed except for `.BaseIOStream.set_close_callback`. - ``streaming_callback`` arguments to `.BaseIOStream.read_bytes` and `.BaseIOStream.read_until_close` have been removed. - Eliminated unnecessary logging of "Errno 0". `tornado.log` ~~~~~~~~~~~~~ - Log files opened by this module are now explicitly set to UTF-8 encoding. `tornado.netutil` ~~~~~~~~~~~~~~~~~ - The results of ``getaddrinfo`` are now sorted by address family to avoid partial failures and deadlocks. `tornado.platform.twisted` ~~~~~~~~~~~~~~~~~~~~~~~~~~ - ``TornadoReactor`` and ``TwistedIOLoop`` have been removed. ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The default HTTP client now supports the ``network_interface`` request argument to specify the source IP for the connection. - If a server returns a 3xx response code without a ``Location`` header, the response is raised or returned directly instead of trying and failing to follow the redirect. - When following redirects, methods other than ``POST`` will no longer be transformed into ``GET`` requests. 301 (permanent) redirects are now treated the same way as 302 (temporary) and 303 (see other) redirects in this respect. - Following redirects now works with ``body_producer``. ``tornado.stack_context`` ~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``tornado.stack_context`` module has been removed. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ - `.TCPServer.start` now supports a ``max_restarts`` argument (same as `.fork_processes`). `tornado.testing` ~~~~~~~~~~~~~~~~~ - `.AsyncHTTPTestCase` now drops all references to the `.Application` during ``tearDown``, allowing its memory to be reclaimed sooner. - `.AsyncTestCase` now cancels all pending coroutines in ``tearDown``, in an effort to reduce warnings from the python runtime about coroutines that were not awaited. Note that this may cause ``asyncio.CancelledError`` to be logged in other places. Coroutines that expect to be running at test shutdown may need to catch this exception. `tornado.web` ~~~~~~~~~~~~~ - The ``asynchronous`` decorator has been removed. - The ``callback`` argument to `.RequestHandler.flush` has been removed. - `.StaticFileHandler` now supports large negative values for the ``Range`` header and returns an appropriate error for ``end > start``. - It is now possible to set ``expires_days`` in ``xsrf_cookie_kwargs``. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ - Pings and other messages sent while the connection is closing are now silently dropped instead of logging exceptions. - Errors raised by ``open()`` are now caught correctly when this method is a coroutine. `tornado.wsgi` ~~~~~~~~~~~~~~ - ``WSGIApplication`` and ``WSGIAdapter`` have been removed. tornado-6.1.0/docs/releases/v6.0.1.rst000066400000000000000000000003121374705040500173020ustar00rootroot00000000000000What's new in Tornado 6.0.1 =========================== Mar 3, 2019 ----------- Bug fixes ~~~~~~~~~ - Fixed issues with type annotations that caused errors while importing Tornado on Python 3.5.2. tornado-6.1.0/docs/releases/v6.0.2.rst000066400000000000000000000005321374705040500173070ustar00rootroot00000000000000What's new in Tornado 6.0.2 =========================== Mar 23, 2019 ------------ Bug fixes ~~~~~~~~~ - `.WebSocketHandler.set_nodelay` works again. - Accessing ``HTTPResponse.body`` now returns an empty byte string instead of raising ``ValueError`` for error responses that don't have a body (it returned None in this case in Tornado 5). tornado-6.1.0/docs/releases/v6.0.3.rst000066400000000000000000000006621374705040500173140ustar00rootroot00000000000000What's new in Tornado 6.0.3 =========================== Jun 22, 2019 ------------ Bug fixes ~~~~~~~~~ - `.gen.with_timeout` always treats ``asyncio.CancelledError`` as a ``quiet_exception`` (this improves compatibility with Python 3.8, which changed ``CancelledError`` to a ``BaseException``). - ``IOStream`` now checks for closed streams earlier, avoiding spurious logged errors in some situations (mainly with websockets). tornado-6.1.0/docs/releases/v6.0.4.rst000066400000000000000000000011741374705040500173140ustar00rootroot00000000000000What's new in Tornado 6.0.4 =========================== Mar 3, 2020 ----------- General changes ~~~~~~~~~~~~~~~ - Binary wheels are now available for Python 3.8 on Windows. Note that it is still necessary to use ``asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())`` for this platform/version. Bug fixes ~~~~~~~~~ - Fixed an issue in `.IOStream` (introduced in 6.0.0) that resulted in ``StreamClosedError`` being incorrectly raised if a stream is closed mid-read but there is enough buffered data to satisfy the read. - `.AnyThreadEventLoopPolicy` now always uses the selector event loop on Windows.tornado-6.1.0/docs/releases/v6.1.0.rst000066400000000000000000000071531374705040500173140ustar00rootroot00000000000000What's new in Tornado 6.1.0 =========================== Oct 30, 2020 ------------ Deprecation notice ~~~~~~~~~~~~~~~~~~ - This is the last release of Tornado to support Python 3.5. Future versions will require Python 3.6 or newer. General changes ~~~~~~~~~~~~~~~ - Windows support has been improved. Tornado is now compatible with the proactor event loop (which became the default in Python 3.8) by automatically falling back to running a selector in a second thread. This means that it is no longer necessary to explicitly configure a selector event loop, although doing so may improve performance. This does not change the fact that Tornado is significantly less scalable on Windows than on other platforms. - Binary wheels are now provided for Windows, MacOS, and Linux (amd64 and arm64). `tornado.gen` ~~~~~~~~~~~~~ - `.coroutine` now has better support for the Python 3.7+ ``contextvars`` module. In particular, the ``ContextVar.reset`` method is now supported. `tornado.http1connection` ~~~~~~~~~~~~~~~~~~~~~~~~~ - ``HEAD`` requests to handlers that used chunked encoding no longer produce malformed output. - Certain kinds of malformed ``gzip`` data no longer cause an infinite loop. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ - Setting ``decompress_response=False`` now works correctly with ``curl_httpclient``. - Mixing requests with and without proxies works correctly in ``curl_httpclient`` (assuming the version of pycurl is recent enough). - A default ``User-Agent`` of ``Tornado/$VERSION`` is now used if the ``user_agent`` parameter is not specified. - After a 303 redirect, ``tornado.simple_httpclient`` always uses ``GET``. Previously this would use ``GET`` if the original request was a ``POST`` and would otherwise reuse the original request method. For ``curl_httpclient``, the behavior depends on the version of ``libcurl`` (with the most recent versions using ``GET`` after 303 regardless of the original method). - Setting ``request_timeout`` and/or ``connect_timeout`` to zero is now supported to disable the timeout. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ - Header parsing is now faster. - `.parse_body_arguments` now accepts incompletely-escaped non-ASCII inputs. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ - `ssl.CertificateError` during the SSL handshake is now handled correctly. - Reads that are resolved while the stream is closing are now handled correctly. `tornado.log` ~~~~~~~~~~~~~ - When colored logging is enabled, ``logging.CRITICAL`` messages are now recognized and colored magenta. `tornado.netutil` ~~~~~~~~~~~~~~~~~ - ``EADDRNOTAVAIL`` is now ignored when binding to ``localhost`` with IPv6. This error is common in docker. `tornado.platform.asyncio` ~~~~~~~~~~~~~~~~~~~~~~~~~~ - `.AnyThreadEventLoopPolicy` now also configures a selector event loop for these threads (the proactor event loop only works on the main thread) ``tornado.platform.auto`` ~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``set_close_exec`` function has been removed. `tornado.testing` ~~~~~~~~~~~~~~~~~ - `.ExpectLog` now has a ``level`` argument to ensure that the given log level is enabled. `tornado.web` ~~~~~~~~~~~~~ - ``RedirectHandler.get`` now accepts keyword arguments. - When sending 304 responses, more headers (including ``Allow``) are now preserved. - ``reverse_url`` correctly handles escaped characters in the regex route. - Default ``Etag`` headers are now generated with SHA-512 instead of MD5. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ - The ``ping_interval`` timer is now stopped when the connection is closed. - `.websocket_connect` now raises an error when it encounters a redirect instead of hanging. tornado-6.1.0/docs/requirements.txt000066400000000000000000000001101374705040500173750ustar00rootroot00000000000000sphinx>1.8.2,<2.1 sphinxcontrib-asyncio==0.2.0 sphinx_rtd_theme Twisted tornado-6.1.0/docs/routing.rst000066400000000000000000000002301374705040500163350ustar00rootroot00000000000000``tornado.routing`` --- Basic routing implementation ==================================================== .. automodule:: tornado.routing :members: tornado-6.1.0/docs/tcpclient.rst000066400000000000000000000002421374705040500166360ustar00rootroot00000000000000``tornado.tcpclient`` --- `.IOStream` connection factory ======================================================== .. automodule:: tornado.tcpclient :members: tornado-6.1.0/docs/tcpserver.rst000066400000000000000000000002531374705040500166700ustar00rootroot00000000000000``tornado.tcpserver`` --- Basic `.IOStream`-based TCP server ============================================================ .. automodule:: tornado.tcpserver :members: tornado-6.1.0/docs/template.rst000066400000000000000000000010421374705040500164630ustar00rootroot00000000000000``tornado.template`` --- Flexible output generation =================================================== .. automodule:: tornado.template Class reference --------------- .. autoclass:: Template(template_string, name="", loader=None, compress_whitespace=None, autoescape="xhtml_escape", whitespace=None) :members: .. autoclass:: BaseLoader :members: .. autoclass:: Loader :members: .. autoclass:: DictLoader :members: .. autoexception:: ParseError .. autofunction:: filter_whitespace tornado-6.1.0/docs/testing.rst000066400000000000000000000012621374705040500163310ustar00rootroot00000000000000``tornado.testing`` --- Unit testing support for asynchronous code ================================================================== .. automodule:: tornado.testing Asynchronous test cases ----------------------- .. autoclass:: AsyncTestCase :members: .. autoclass:: AsyncHTTPTestCase :members: .. autoclass:: AsyncHTTPSTestCase :members: .. autofunction:: gen_test Controlling log output ---------------------- .. autoclass:: ExpectLog :members: Test runner ----------- .. autofunction:: main Helper functions ---------------- .. autofunction:: bind_unused_port .. autofunction:: get_async_test_timeout tornado-6.1.0/docs/tornado.png000066400000000000000000000156751374705040500163130ustar00rootroot00000000000000‰PNG  IHDRH išˆtEXtSoftwareAdobe ImageReadyqÉe<_IDATxÚì][l\×uÝó~¿É¡HŠIY„$˲d*~$Q,%Ѝ#•Qä§6PDŸ‰Q ÒOÑ´_ÒGj§V?¢¶âª)Р‘RÓŽ;Ž(Ë’ìX/Š’(’"9ï÷ƒ3ÝûÜ{îÜ 9wÈácȳ‰Ã{ç¾çž³×¬½ÏÙûèÊå2¬³±ÃrËn,}X‚XŠX>ÆrË"dÓˆn€ç–ïÉ€óµÇƱüËyQ]B„àYؼŽå/°ô.ãüAÁ|„À£Ulþ˾ÚÙ…ˆçó0ŸÍCK¡T†X¡ ìßé²Ãþ€$Æó†¨2!BÚ_Œ«t]/–cùëZv“È`&“…™tæsù%/r7‘†A—\f}Ü#ªKˆ*ªMˆ­ <Ô%þoj³ŠLª‘DCÀñ˜LÐç´Á6›UÍj˜(^ÈC,_„LqlJ{}¾û—X¾+ªMˆ­kj½’™I$—ƒOçcU½Rõ˜Ív4Ÿúö*°y”JÁL:¡\R6$å’ˆ„‹N~«…>öˆ*"dk™V`y†oø"ƒ›±ä’€3ìq€˩l›Íd˜³y:“SØ‘6ò?ZvZÌ`7)©U&DÈž…Ré€N§WÝô9…ìæj(ºhO•Í`€ÝÞ àïg&[RœÙpÀ!ñ" ПǃK5+’å_D• Òþ¢Ùǃ¬ä€^b:ÌŸÍçá£ÙH]_ŽI¯‡a·vº€@¹…"ÜK¤ªœÍj°éCó«‹Ãdýâœf$ç²!B¶ðäˆéÀoq•Q—‡É|Ž×=v›Í{}np%2õe4÷Ф"Àᦔ©BóË«b5É|æ3yHаÓï«A!dÿˆ€ô÷¢º„Ù"¦šGcõ[¤' t%Óp-¯Ërv!Ë”ͪ)4©>$ ƒæg7&D¯!:‹Yî*§^¬éd Šæ‹@@Xf ´Ï÷øùåßdNB„ÙÀ“) tȼrê4ƒóŒßÍÆÞäK p-c!ÜIlä€ãAÀÑK€3›ÊÀx,S©pÖÅÉ-i›ÊˆûÛ……Òð2i\ÿ=–Oè&Dõ ²ÉL­t±èeLGîœB ¸^t x™ù4›ÉÂg¡8Èq¬cÀÓg·ÂÞ€L‰±Ì# Ý '`.#9¤•¬2±&tZÍÐí´BË&£RúúY,ÿ‹å—fƒþç¢*…ÙÀ“*þ¤‚h6eáFÐéEPÙç—÷݉&à62˜²LYÜF#ìïô€Û"Dá5nE’ pÊ âH̦Ça…~šn·ýÉÔiêA'&ôOX~j5¢¢Z…iCàI 40ð-æƒ)àò|Š¥êãzt|¬W®£i5™Ì(¦Ò Ë{Ò>JuñÅ\ ¨öÓÒˆìf‡Ó;ýN°›%‹//Â\2 Ñl"¹"2§[óx]9MðZŒDVäsXžxt,g3ßU+DHO<ŸÐî®ºŠ¸ïÊ|âIùy¯ÎÓÈtˆŒ\Ÿ‹Âd*+ùrLöú\Ð+3—™D®"èø”ešS.ë`·ß;}Nf~¸Ü¤`"žh® ™h*6TëûQ|@øG‡¼^8¬UîªóXÞ´‚ýÒ&ÀóH‘æh:%á~"]å‹q¡ u¨ËÇ@æÆ<1,êÕza›O1­ˆåÜ¥e°a·‚n»öÝ`3¡ˆ€s'”@- yUxÄR`#C—êØÊs?Óé„=]n0•Þ¯«“é ¨b!B68ðDsù#ÈbxÐÄÉòµ/†z§÷v2Ð™Š§áZ(Î…¶¿ÐM cb æ£GaˆÉì…Ć`p°Ë»dÝœÃíhZÅ„*€ÓæS'çCãÅ%wJÓu#ƒšŒea2‘…ü‚ê\‰|V €Ó¦Œ :ï4™DÖB!B62ðDr¹ \ì@S >™ C¼XTÌ+Òî+dÐôúýTXˆ»^@@À(Kðñ£Äò {! 9°Í &Ÿ²£±™¤ ¥*°ñ˜° ͯMš¥„@èÚT ¾œOq\T@ˆ˜×Ÿï‚Óª€Ï«.³é‚¨j!B6Ž(ZÎæ¨k­O%Ó(ˬ¢„àCfR2u®£•Gå§è‡=>'øìfÈà÷“adMEv<ÊÓ'êõƒÁäKd9<Œ°lŒN‰_/[„H&%90´Q!péóÁ·†ȶôìü9sø¿¹3ÏžSþ{KT³!K,êí8k¦TþSîဋm#‹Âh=`5C¿ÇÁÖ?E&Cæg ‚.èó: ýi>SxÂ7£öÛÜ §a&™‡#C`6–|h~ Û/üšMeG(]€[3 ص…• Äòù×=fóùF/ã•ŸŽŠ!¤Ýd˦Šò©¶G°\i^ºñ Éxæ3Ù™#4s© ¤Š¥ #Á}N+|H¯oER¬gŠ 9ŠiÛ½pfR9v,ól‡ ¶# EÓyøíýÌ!pvÇËÿTÌçÊ£˜š­4ü z¬0èµÉ׬ÜãÓ©„r/,ßíSÈ&•rñÕl÷©ömLS‹±™0L%+BJl@ÆÓçµ3öJç ]X` Ôë0ƒ™IÍÍ0gH:Ø…ÇöbI!ûùðš^Ùbè(€#o«#€ÉxV  ƒ|?>ŸêúThòÍ'² Žˆö)DȆžòQROr?Fp!‡2g<4¢Ø(³ÉXZñÃì@3Š–£iÈÉ ÉjÐîN) êÇ£¸½\6@+WMI6ë¨ÐµB~Z|=TèÜ.d<ìÚ%©g‹™…±¿3œÍ hüõ(¯C Ë”Yˆ­<Ó4³àYRÐÙdVf ºŠSÙeUXÄLJòíPÊ §ÕÈÀäÌPè¼aŸm»3—`ƒkM©ÊRW6ÌÙ¬b1yõ¾F%'÷¢Ñ5¨^ÙŽìL¥ÝZ€çØ:Õƒoï-DÈÚªè·8@„™/¦Âv(<Á"ÈËä‹lì íó3å–‚›R4¶§—€×ïD2‹šRK©u¯mïôXš¢ Ó±ŒrîÎ{Ø Ü4:ê`ÁGˆ-!FÔǯò4pIÕãÔi7ËÊ]f¾²l*‘9CÛbrï­»LŠg Ê/(×PÔ]γSÛ£dg»g¹2B™¤Qœèý°t>›I6ÝÊOÞ_ˆ!xžã\Ô±R¤È|½Äü>Òº™ºw  Âf"rV¹Ø@ð”k¢Ô¥ÕŸM<òy)²’@ »=Ÿagì Øýä*ù|Jå*"DÈÆ1µz™™Åré“…–N‹IñÓøœfx&èdÅE€„'»d`¢Ë™éEl§TÇo£6¥¸“¹\cjuâõ‚öŠã˜Ò ºÓ?¼‘¢©÷T—âøü|§¥A›mTÃû[ǺÍQÈ–a<ˆœx”U€@bÐëªØF×&³›23Œ¸Ì:×Å¡ßk}"€SÍnê/Ë 8žßá©Ënø¦Z‹ëw£0ȳë?»Ý &“ž1ž’l/ú&Îx&5¾sXFšðÉ4:ö’ÆkÑ`¯wEs²e€‡¯o†›R¤üV£nI#Eɽƒ`te:ÁÖIJrÔâ¦Ô˜aôÏ„vd—ì6㢦‘ªŸç“»øb6ÍîÓï±ÂÞí.H¤ò—C2Ìz=øÝƺ°.7Á:Žk<–z¡.68æ¸hbB„Ôe<’:³+ݰ {‚(;àö€fSx“ÒcL%rUQãå2TE¸×uz¬F8<ä‡ÊŸTè¸ßQäó‡ øü±4hÑ`õõaŸCö(Iö®éyIqe2`ý—¨j!B6ð¨@ÝãÄü. NæÀt°ÏÍÖD³‹šRêÏe™å wØa_Ÿ»Êœ‚†÷pX ,Ç6·ûYä;m¿=—QÀ³ßoSzÇðÐQQÕB„l$à‘5Þ"+/ÿLs[i,ú=ÐçµÂŸ‰Ì!ª^«vЋǒOFë„‚j–Õßi‡tÚ«î ÙNVN·á0`G—ß÷Âv‡c¢Íë‰ÇÝOiH^úêø“ÈW4&¯/×Q>õß•—jù¡ü\ÇTÇѽÏ.rÿf® ò÷äßûXIÌ¿çbç®ä»/õžÇTß“?C«ëYý êú¥ûƒµíˆX•¶gä&ŒA T®°“t¡ Z1³ ¸-ðu,éLæùÊx,^4¥‚¸ÀF ‹9ޱ,õ9ù| >žˆÉ¦œnw*Ç!Pµ³™5"+÷5úœ*Aã2œkòž—¡þ`FºþkªÆø xr´5o¤Ô@-óÚü{ŸÅGsÉå„|ÜIX¾sÞ§zÇCëDý¾Çåw|v…u}J.¾%ê÷˜|ÌY¹DÚµíã™Áå6Å!¬2·Ò¹Ø,F ` !ü³YMÐo55–ƒ_]ã S®biYe#ÀÝXâ@Î)¿ÂÔcpZö'—mh­ŽÆ•€G3&DdßãJ•ù]ÕûÕ©ÊNÙ‡4Ö¢g8ÓÄ»¼•ž£MÑöô2¨\`£•ÍFèq›«L—éx¦Ã™ee·ZœÁh¬z us2 ÿs-$Œj1தÝ(W›·L,-¿ÔTé‡äÊæ‘+ÊNÐÖƒuªÅÏÎóúª»Ð[ÑÅ<.+º_VüC ­çJKn£±:ÊÍßákª÷[û<çä縤AÉÓÆúö«Ý/¿“H»·=žðg\±Ÿ ÚŸÈsåaR™‚Ê \[óÙ€æ™#´€ÖG_FáÃ;Q)ã!åÒéàÛû,&K˜ª­6‘ÅIXºË6"ÓH)†ZD©#²’—Ÿ‹7LÎ V"çT9¢‹×46ð Ï~®p΂öž¿“Þq#Ó–YßüÆÚ¹í1àÙãó©užÔÔj5¿µ’¦”%ü*ÃÆã—»¬Ÿ4‰šé¹j´¦æ³pá“ÇðÅtJ1©(TâÛÏÀOIÃjRªª@«]¦0ndþ\íãqNjll+ã‹0ñþ"7ê?­áú>ïÉËÓè;ެðýŸkPß<¦p¬]Ûž^¥¤o¢GI‰wm³ƒËb¬ê)ŠfŠðá­¤3ÅŠb/£4ZÉt>ø< ¿º‚Pª¨<ÏvŸ¾}°ƒ²)w"yùZ:pZ h‘ÿªM̬ ±¥X©) ¥­F -cq´Dò¬A½-÷ûhF-ƒ9㌴cÛS€çi¿'Jã^HF=<»ÝÉÒ™–xâw|~ó§0L¿÷íë¡è˸ú=—Ã/ yàwwc,Õ·ahšâË÷ã0æO¿ß=~ ƒ0,‘ž´ ““á<ŠæØ ¤,Ëád ¾³ß>·¹îY÷ç²UÑòÄr^Üå—ÃTå¹I$ 0Ÿ*€N xyºÖ¶ mUNŸVükžÕ’Èø~kÁp[]ßm×öŒO¼ÿpõ€Ëi„¯îtÇ·c2`ÐdëR^ŠÅ¢ry"n›‘E›Ó8 5(ÌÆóK!'ƒ0ªSâ¨î#xy\溠µ{»Ù0u°·×ÝV¶%!S7úED€ë»ºìªX®¶‰ÑjÔ—óë®åœcËlXؼ¢b=Ö„iÔì=6Êû]·¶÷ðìx£Ÿ…"Gqõ= |LðÕ§••Ô©|½$Os\;Àºê½È{ è77Ì©¼Ø Ãkãq¸õXÊ@H^Þã³Yɱü3¡KB4²¾»°òÐ!MÊ¢¡ç:|ÑOç‰ù”߯3ó§Ï;|nOgà~( y"= t2»©f6œ¡˜Ø¼[Âó»ü–'ösYÊW¤ŠFçSñ¼„æà@·2;ÅÕýßÛ¢j…h`9ïˆ×°Á€‡ä ‚.Þ¸2&Éϱxf=ìA“†JM¬HªQ¹LEsU¦Ô@À ^‡ºW¹OÐÊa‰=¯¨V!˜Ž <\žëð_› âê±üÈÓÛÐd¢Ò#»b.^™‡Hº’üëq,»A1¿ÂÑVP¢ÛwtXáè~Ÿjnvx³MAK#§_¥f†®ŸhÁ=7³§‘‰µÚ,£‘âŽhüqh6ëâ†i{ÆV½Íçƒó>lJà3@ìg°×΀‚Ì- 4% % ¬kz¥¦Ây6*:™-1ŸO"· Ð‚Ï·FU,èætZI{ñÒn7˜ÌÊ,o Ú•íðY |-ª|-IË·2ð¬Ôï±R¦¡PNAã|Ê­p¯[ÛÓ·²Æ|F_9Þৃ½6øæ3> É%Êr¸9|~ö OÀU\R;C£Æ6&ðeÙ¦ÀÊÆýD4¼ÿ°tÔ8í»ØÎmO¿5÷B°ã<.Ž"(PvCÖ•þÝ;aW·4ä‡ÜÛ,J“üõù­°¿ß‡Ÿñ(¨YdB܈*Ñé{ɯSX¸ j¡÷g ñô'Z’ŸÛâÀÒHéO5`9­È ­¥¾ß©p'ä:~§ÝÛž®Ù õš‘ÏSÀ÷ð,)õTås%˜çà³{I6ˆOøç´à•;Àly ßÿ4 ·g¤hö€ÃßÿFïšx±«c°UÏûÊOG›õ4úÕÑ5q=­Ý³êÉä¸2hÍhGç5šÏ½ÜÂï´Z×nôî—úžZò4\)‡T¿üZó5ëØ]X›,žrCµ½U.¿›™û .~ªT¤T+Ÿ'᳉óó0f„ ò¯ÀlÖ+õ&NFi±¯"sêô+NéW_ê기I€‡*ïò*6Fj4”¹n|‹ÏZ)}£ïÒìüa« šåTüqÁ£ü]œÝÏqZãk‹´ Ý¬yÛÓ¯Õ[Fð‰byûëÛ:S!3‰¶|f8þœOr÷ ÂܘLÁè•üdz0É+éOïõÀð€ƒg/¤¤e¯nB¥XÎl˜Z+^8”[«ôc-°•fƒÖå_^Ó¶§_ZGð¡Y-Òì98”¼kHŽ·¢Ï·f*³ZÃù;#~ÞáàctŽmR¥he2oòQì SWË­¸ÁÊG€¯$wò¹U`±kÖöôëU뇻;@ʇɻC¥/h­ÌH!÷zííµÃ_¾„î …÷a±t\!k¼A¥¯d¬ ÿ%;¹ 刨år~Q/­Òwjåµ[ñîyhÄÉ&”—ûŽNk¨¿fëú´ÆºâÏ žæf©äóã±í­‰sy)yz–b \Ssð«±0øFxªÇ;¶YÁíªßxU‡æÕ7ºƒ«õ,M:—¹ÔK˜Ýêz<#ÞªÔ6>>äR‹î7²È=6ҵ뽋å~ÿ‘šw\ rï.¢ÄµÇG`eŒ÷«sÍK ®}lêlUÚÞºÏ{S³3¸èÊg¥QË«¡ö2©~v¤'øa²9ĸÞ€¸w‹€Çd‘§TV"Óà`yûhO0*ªJˆ<­(ÿ.3ú:²uÞG0ýfopTT!›Sþ_€upÙЪ”4AIEND®B`‚tornado-6.1.0/docs/twisted.rst000066400000000000000000000004341374705040500163370ustar00rootroot00000000000000``tornado.platform.twisted`` --- Bridges between Twisted and Tornado ======================================================================== .. automodule:: tornado.platform.twisted Twisted DNS resolver -------------------- .. autoclass:: TwistedResolver :members: tornado-6.1.0/docs/util.rst000066400000000000000000000002711374705040500156300ustar00rootroot00000000000000``tornado.util`` --- General-purpose utilities ============================================== .. testsetup:: from tornado.util import * .. automodule:: tornado.util :members: tornado-6.1.0/docs/utilities.rst000066400000000000000000000001441374705040500166650ustar00rootroot00000000000000Utilities ========= .. toctree:: autoreload concurrent log options testing util tornado-6.1.0/docs/web.rst000066400000000000000000000334341374705040500154370ustar00rootroot00000000000000``tornado.web`` --- ``RequestHandler`` and ``Application`` classes ================================================================== .. testsetup:: from tornado.web import * .. automodule:: tornado.web Request handlers ---------------- .. autoclass:: RequestHandler(...) Entry points ^^^^^^^^^^^^ .. automethod:: RequestHandler.initialize .. automethod:: RequestHandler.prepare .. automethod:: RequestHandler.on_finish .. _verbs: Implement any of the following methods (collectively known as the HTTP verb methods) to handle the corresponding HTTP method. These methods can be made asynchronous with the ``async def`` keyword or `.gen.coroutine` decorator. The arguments to these methods come from the `.URLSpec`: Any capturing groups in the regular expression become arguments to the HTTP verb methods (keyword arguments if the group is named, positional arguments if it's unnamed). To support a method not on this list, override the class variable ``SUPPORTED_METHODS``:: class WebDAVHandler(RequestHandler): SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('PROPFIND',) def propfind(self): pass .. automethod:: RequestHandler.get .. automethod:: RequestHandler.head .. automethod:: RequestHandler.post .. automethod:: RequestHandler.delete .. automethod:: RequestHandler.patch .. automethod:: RequestHandler.put .. automethod:: RequestHandler.options Input ^^^^^ The ``argument`` methods provide support for HTML form-style arguments. These methods are available in both singular and plural forms because HTML forms are ambiguous and do not distinguish between a singular argument and a list containing one entry. If you wish to use other formats for arguments (for example, JSON), parse ``self.request.body`` yourself:: def prepare(self): if self.request.headers['Content-Type'] == 'application/x-json': self.args = json_decode(self.request.body) # Access self.args directly instead of using self.get_argument. .. automethod:: RequestHandler.get_argument(name: str, default: Union[None, str, RAISE] = RAISE, strip: bool = True) -> Optional[str] .. automethod:: RequestHandler.get_arguments .. automethod:: RequestHandler.get_query_argument(name: str, default: Union[None, str, RAISE] = RAISE, strip: bool = True) -> Optional[str] .. automethod:: RequestHandler.get_query_arguments .. automethod:: RequestHandler.get_body_argument(name: str, default: Union[None, str, RAISE] = RAISE, strip: bool = True) -> Optional[str] .. automethod:: RequestHandler.get_body_arguments .. automethod:: RequestHandler.decode_argument .. attribute:: RequestHandler.request The `tornado.httputil.HTTPServerRequest` object containing additional request parameters including e.g. headers and body data. .. attribute:: RequestHandler.path_args .. attribute:: RequestHandler.path_kwargs The ``path_args`` and ``path_kwargs`` attributes contain the positional and keyword arguments that are passed to the :ref:`HTTP verb methods `. These attributes are set before those methods are called, so the values are available during `prepare`. .. automethod:: RequestHandler.data_received Output ^^^^^^ .. automethod:: RequestHandler.set_status .. automethod:: RequestHandler.set_header .. automethod:: RequestHandler.add_header .. automethod:: RequestHandler.clear_header .. automethod:: RequestHandler.set_default_headers .. automethod:: RequestHandler.write .. automethod:: RequestHandler.flush .. automethod:: RequestHandler.finish .. automethod:: RequestHandler.render .. automethod:: RequestHandler.render_string .. automethod:: RequestHandler.get_template_namespace .. automethod:: RequestHandler.redirect .. automethod:: RequestHandler.send_error .. automethod:: RequestHandler.write_error .. automethod:: RequestHandler.clear .. automethod:: RequestHandler.render_linked_js .. automethod:: RequestHandler.render_embed_js .. automethod:: RequestHandler.render_linked_css .. automethod:: RequestHandler.render_embed_css Cookies ^^^^^^^ .. autoattribute:: RequestHandler.cookies .. automethod:: RequestHandler.get_cookie .. automethod:: RequestHandler.set_cookie .. automethod:: RequestHandler.clear_cookie .. automethod:: RequestHandler.clear_all_cookies .. automethod:: RequestHandler.get_secure_cookie .. automethod:: RequestHandler.get_secure_cookie_key_version .. automethod:: RequestHandler.set_secure_cookie .. automethod:: RequestHandler.create_signed_value .. autodata:: MIN_SUPPORTED_SIGNED_VALUE_VERSION .. autodata:: MAX_SUPPORTED_SIGNED_VALUE_VERSION .. autodata:: DEFAULT_SIGNED_VALUE_VERSION .. autodata:: DEFAULT_SIGNED_VALUE_MIN_VERSION Other ^^^^^ .. attribute:: RequestHandler.application The `Application` object serving this request .. automethod:: RequestHandler.check_etag_header .. automethod:: RequestHandler.check_xsrf_cookie .. automethod:: RequestHandler.compute_etag .. automethod:: RequestHandler.create_template_loader .. autoattribute:: RequestHandler.current_user .. automethod:: RequestHandler.detach .. automethod:: RequestHandler.get_browser_locale .. automethod:: RequestHandler.get_current_user .. automethod:: RequestHandler.get_login_url .. automethod:: RequestHandler.get_status .. automethod:: RequestHandler.get_template_path .. automethod:: RequestHandler.get_user_locale .. autoattribute:: RequestHandler.locale .. automethod:: RequestHandler.log_exception .. automethod:: RequestHandler.on_connection_close .. automethod:: RequestHandler.require_setting .. automethod:: RequestHandler.reverse_url .. automethod:: RequestHandler.set_etag_header .. autoattribute:: RequestHandler.settings .. automethod:: RequestHandler.static_url .. automethod:: RequestHandler.xsrf_form_html .. autoattribute:: RequestHandler.xsrf_token Application configuration ------------------------- .. autoclass:: Application(handlers: Optional[List[Union[Rule, Tuple]]] = None, default_host: Optional[str] = None, transforms: Optional[List[Type[OutputTransform]]] = None, **settings) .. attribute:: settings Additional keyword arguments passed to the constructor are saved in the `settings` dictionary, and are often referred to in documentation as "application settings". Settings are used to customize various aspects of Tornado (although in some cases richer customization is possible by overriding methods in a subclass of `RequestHandler`). Some applications also like to use the `settings` dictionary as a way to make application-specific settings available to handlers without using global variables. Settings used in Tornado are described below. General settings: * ``autoreload``: If ``True``, the server process will restart when any source files change, as described in :ref:`debug-mode`. This option is new in Tornado 3.2; previously this functionality was controlled by the ``debug`` setting. * ``debug``: Shorthand for several debug mode settings, described in :ref:`debug-mode`. Setting ``debug=True`` is equivalent to ``autoreload=True``, ``compiled_template_cache=False``, ``static_hash_cache=False``, ``serve_traceback=True``. * ``default_handler_class`` and ``default_handler_args``: This handler will be used if no other match is found; use this to implement custom 404 pages (new in Tornado 3.2). * ``compress_response``: If ``True``, responses in textual formats will be compressed automatically. New in Tornado 4.0. * ``gzip``: Deprecated alias for ``compress_response`` since Tornado 4.0. * ``log_function``: This function will be called at the end of every request to log the result (with one argument, the `RequestHandler` object). The default implementation writes to the `logging` module's root logger. May also be customized by overriding `Application.log_request`. * ``serve_traceback``: If ``True``, the default error page will include the traceback of the error. This option is new in Tornado 3.2; previously this functionality was controlled by the ``debug`` setting. * ``ui_modules`` and ``ui_methods``: May be set to a mapping of `UIModule` or UI methods to be made available to templates. May be set to a module, dictionary, or a list of modules and/or dicts. See :ref:`ui-modules` for more details. * ``websocket_ping_interval``: If set to a number, all websockets will be pinged every n seconds. This can help keep the connection alive through certain proxy servers which close idle connections, and it can detect if the websocket has failed without being properly closed. * ``websocket_ping_timeout``: If the ping interval is set, and the server doesn't receive a 'pong' in this many seconds, it will close the websocket. The default is three times the ping interval, with a minimum of 30 seconds. Ignored if the ping interval is not set. Authentication and security settings: * ``cookie_secret``: Used by `RequestHandler.get_secure_cookie` and `.set_secure_cookie` to sign cookies. * ``key_version``: Used by requestHandler `.set_secure_cookie` to sign cookies with a specific key when ``cookie_secret`` is a key dictionary. * ``login_url``: The `authenticated` decorator will redirect to this url if the user is not logged in. Can be further customized by overriding `RequestHandler.get_login_url` * ``xsrf_cookies``: If ``True``, :ref:`xsrf` will be enabled. * ``xsrf_cookie_version``: Controls the version of new XSRF cookies produced by this server. Should generally be left at the default (which will always be the highest supported version), but may be set to a lower value temporarily during version transitions. New in Tornado 3.2.2, which introduced XSRF cookie version 2. * ``xsrf_cookie_kwargs``: May be set to a dictionary of additional arguments to be passed to `.RequestHandler.set_cookie` for the XSRF cookie. * ``twitter_consumer_key``, ``twitter_consumer_secret``, ``friendfeed_consumer_key``, ``friendfeed_consumer_secret``, ``google_consumer_key``, ``google_consumer_secret``, ``facebook_api_key``, ``facebook_secret``: Used in the `tornado.auth` module to authenticate to various APIs. Template settings: * ``autoescape``: Controls automatic escaping for templates. May be set to ``None`` to disable escaping, or to the *name* of a function that all output should be passed through. Defaults to ``"xhtml_escape"``. Can be changed on a per-template basis with the ``{% autoescape %}`` directive. * ``compiled_template_cache``: Default is ``True``; if ``False`` templates will be recompiled on every request. This option is new in Tornado 3.2; previously this functionality was controlled by the ``debug`` setting. * ``template_path``: Directory containing template files. Can be further customized by overriding `RequestHandler.get_template_path` * ``template_loader``: Assign to an instance of `tornado.template.BaseLoader` to customize template loading. If this setting is used the ``template_path`` and ``autoescape`` settings are ignored. Can be further customized by overriding `RequestHandler.create_template_loader`. * ``template_whitespace``: Controls handling of whitespace in templates; see `tornado.template.filter_whitespace` for allowed values. New in Tornado 4.3. Static file settings: * ``static_hash_cache``: Default is ``True``; if ``False`` static urls will be recomputed on every request. This option is new in Tornado 3.2; previously this functionality was controlled by the ``debug`` setting. * ``static_path``: Directory from which static files will be served. * ``static_url_prefix``: Url prefix for static files, defaults to ``"/static/"``. * ``static_handler_class``, ``static_handler_args``: May be set to use a different handler for static files instead of the default `tornado.web.StaticFileHandler`. ``static_handler_args``, if set, should be a dictionary of keyword arguments to be passed to the handler's ``initialize`` method. .. automethod:: Application.listen .. automethod:: Application.add_handlers(handlers: List[Union[Rule, Tuple]]) .. automethod:: Application.get_handler_delegate .. automethod:: Application.reverse_url .. automethod:: Application.log_request .. autoclass:: URLSpec The ``URLSpec`` class is also available under the name ``tornado.web.url``. Decorators ---------- .. autofunction:: authenticated .. autofunction:: addslash .. autofunction:: removeslash .. autofunction:: stream_request_body Everything else --------------- .. autoexception:: HTTPError .. autoexception:: Finish .. autoexception:: MissingArgumentError .. autoclass:: UIModule :members: .. autoclass:: ErrorHandler .. autoclass:: FallbackHandler .. autoclass:: RedirectHandler .. autoclass:: StaticFileHandler :members: tornado-6.1.0/docs/webframework.rst000066400000000000000000000001521374705040500173440ustar00rootroot00000000000000Web framework ============= .. toctree:: web template routing escape locale websocket tornado-6.1.0/docs/websocket.rst000066400000000000000000000022441374705040500166430ustar00rootroot00000000000000``tornado.websocket`` --- Bidirectional communication to the browser ==================================================================== .. testsetup:: import tornado.websocket .. automodule:: tornado.websocket .. autoclass:: WebSocketHandler Event handlers -------------- .. automethod:: WebSocketHandler.open .. automethod:: WebSocketHandler.on_message .. automethod:: WebSocketHandler.on_close .. automethod:: WebSocketHandler.select_subprotocol .. autoattribute:: WebSocketHandler.selected_subprotocol .. automethod:: WebSocketHandler.on_ping Output ------ .. automethod:: WebSocketHandler.write_message .. automethod:: WebSocketHandler.close Configuration ------------- .. automethod:: WebSocketHandler.check_origin .. automethod:: WebSocketHandler.get_compression_options .. automethod:: WebSocketHandler.set_nodelay Other ----- .. automethod:: WebSocketHandler.ping .. automethod:: WebSocketHandler.on_pong .. autoexception:: WebSocketClosedError Client-side support ------------------- .. autofunction:: websocket_connect .. autoclass:: WebSocketClientConnection :members: tornado-6.1.0/docs/wsgi.rst000066400000000000000000000003551374705040500156270ustar00rootroot00000000000000``tornado.wsgi`` --- Interoperability with other Python frameworks and servers ============================================================================== .. automodule:: tornado.wsgi .. autoclass:: WSGIContainer :members: tornado-6.1.0/maint/000077500000000000000000000000001374705040500143015ustar00rootroot00000000000000tornado-6.1.0/maint/README000066400000000000000000000002411374705040500151560ustar00rootroot00000000000000This directory contains tools and scripts that are used in the development and maintenance of Tornado itself, but are probably not of interest to Tornado users. tornado-6.1.0/maint/benchmark/000077500000000000000000000000001374705040500162335ustar00rootroot00000000000000tornado-6.1.0/maint/benchmark/benchmark.py000077500000000000000000000045771374705040500205570ustar00rootroot00000000000000#!/usr/bin/env python # # A simple benchmark of tornado's HTTP stack. # Requires 'ab' to be installed. # # Running without profiling: # demos/benchmark/benchmark.py # demos/benchmark/benchmark.py --quiet --num_runs=5|grep "Requests per second" # # Running with profiling: # # python -m cProfile -o /tmp/prof demos/benchmark/benchmark.py # python -m pstats /tmp/prof # % sort time # % stats 20 from tornado.ioloop import IOLoop from tornado.options import define, options, parse_command_line from tornado.web import RequestHandler, Application import random import signal import subprocess try: xrange except NameError: xrange = range # choose a random port to avoid colliding with TIME_WAIT sockets left over # from previous runs. define("min_port", type=int, default=8000) define("max_port", type=int, default=9000) # Increasing --n without --keepalive will eventually run into problems # due to TIME_WAIT sockets define("n", type=int, default=15000) define("c", type=int, default=25) define("keepalive", type=bool, default=False) define("quiet", type=bool, default=False) # Repeat the entire benchmark this many times (on different ports) # This gives JITs time to warm up, etc. Pypy needs 3-5 runs at # --n=15000 for its JIT to reach full effectiveness define("num_runs", type=int, default=1) define("ioloop", type=str, default=None) class RootHandler(RequestHandler): def get(self): self.write("Hello, world") def _log(self): pass def handle_sigchld(sig, frame): IOLoop.current().add_callback_from_signal(IOLoop.current().stop) def main(): parse_command_line() if options.ioloop: IOLoop.configure(options.ioloop) for i in xrange(options.num_runs): run() def run(): io_loop = IOLoop(make_current=True) app = Application([("/", RootHandler)]) port = random.randrange(options.min_port, options.max_port) app.listen(port, address='127.0.0.1') signal.signal(signal.SIGCHLD, handle_sigchld) args = ["ab"] args.extend(["-n", str(options.n)]) args.extend(["-c", str(options.c)]) if options.keepalive: args.append("-k") if options.quiet: # just stops the progress messages printed to stderr args.append("-q") args.append("http://127.0.0.1:%d/" % port) subprocess.Popen(args) io_loop.start() io_loop.close() io_loop.clear_current() if __name__ == '__main__': main() tornado-6.1.0/maint/benchmark/chunk_benchmark.py000077500000000000000000000031261374705040500217340ustar00rootroot00000000000000#!/usr/bin/env python # # Downloads a large file in chunked encoding with both curl and simple clients import logging from tornado.curl_httpclient import CurlAsyncHTTPClient from tornado.simple_httpclient import SimpleAsyncHTTPClient from tornado.ioloop import IOLoop from tornado.options import define, options, parse_command_line from tornado.web import RequestHandler, Application try: xrange except NameError: xrange = range define('port', default=8888) define('num_chunks', default=1000) define('chunk_size', default=2048) class ChunkHandler(RequestHandler): def get(self): for i in xrange(options.num_chunks): self.write('A' * options.chunk_size) self.flush() self.finish() def main(): parse_command_line() app = Application([('/', ChunkHandler)]) app.listen(options.port, address='127.0.0.1') def callback(response): response.rethrow() assert len(response.body) == (options.num_chunks * options.chunk_size) logging.warning("fetch completed in %s seconds", response.request_time) IOLoop.current().stop() logging.warning("Starting fetch with curl client") curl_client = CurlAsyncHTTPClient() curl_client.fetch('http://localhost:%d/' % options.port, callback=callback) IOLoop.current().start() logging.warning("Starting fetch with simple client") simple_client = SimpleAsyncHTTPClient() simple_client.fetch('http://localhost:%d/' % options.port, callback=callback) IOLoop.current().start() if __name__ == '__main__': main() tornado-6.1.0/maint/benchmark/gen_benchmark.py000077500000000000000000000022531374705040500213750ustar00rootroot00000000000000#!/usr/bin/env python # # A simple benchmark of the tornado.gen module. # Runs in two modes, testing new-style (@coroutine and Futures) # and old-style (@engine and Tasks) coroutines. from timeit import Timer from tornado import gen from tornado.options import options, define, parse_command_line define('num', default=10000, help='number of iterations') # These benchmarks are delicate. They hit various fast-paths in the gen # machinery in order to stay synchronous so we don't need an IOLoop. # This removes noise from the results, but it's easy to change things # in a way that completely invalidates the results. @gen.engine def e2(callback): callback() @gen.engine def e1(): for i in range(10): yield gen.Task(e2) @gen.coroutine def c2(): pass @gen.coroutine def c1(): for i in range(10): yield c2() def main(): parse_command_line() t = Timer(e1) results = t.timeit(options.num) / options.num print('engine: %0.3f ms per iteration' % (results * 1000)) t = Timer(c1) results = t.timeit(options.num) / options.num print('coroutine: %0.3f ms per iteration' % (results * 1000)) if __name__ == '__main__': main() tornado-6.1.0/maint/benchmark/parsing_benchmark.py000066400000000000000000000060151374705040500222640ustar00rootroot00000000000000#!/usr/bin/env python import re import timeit from enum import Enum from typing import Callable from tornado.httputil import HTTPHeaders from tornado.options import define, options, parse_command_line define("benchmark", type=str) define("num_runs", type=int, default=1) _CRLF_RE = re.compile(r"\r?\n") _TEST_HEADERS = ( "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp," "image/apng,*/*;q=0.8,application/signed-exchange;v=b3\r\n" "Accept-Encoding: gzip, deflate, br\r\n" "Accept-Language: ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7\r\n" "Cache-Control: max-age=0\r\n" "Connection: keep-alive\r\n" "Host: example.com\r\n" "Upgrade-Insecure-Requests: 1\r\n" "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36\r\n" ) def headers_split_re(headers: str) -> None: for line in _CRLF_RE.split(headers): pass def headers_split_simple(headers: str) -> None: for line in headers.split("\n"): if line.endswith("\r"): line = line[:-1] def headers_parse_re(headers: str) -> HTTPHeaders: h = HTTPHeaders() for line in _CRLF_RE.split(headers): if line: h.parse_line(line) return h def headers_parse_simple(headers: str) -> HTTPHeaders: h = HTTPHeaders() for line in headers.split("\n"): if line.endswith("\r"): line = line[:-1] if line: h.parse_line(line) return h def run_headers_split(): regex_time = timeit.timeit(lambda: headers_split_re(_TEST_HEADERS), number=100000) print("regex", regex_time) simple_time = timeit.timeit( lambda: headers_split_simple(_TEST_HEADERS), number=100000 ) print("str.split", simple_time) print("speedup", regex_time / simple_time) def run_headers_full(): regex_time = timeit.timeit(lambda: headers_parse_re(_TEST_HEADERS), number=10000) print("regex", regex_time) simple_time = timeit.timeit( lambda: headers_parse_simple(_TEST_HEADERS), number=10000 ) print("str.split", simple_time) print("speedup", regex_time / simple_time) class Benchmark(Enum): def __new__(cls, arg_value: str, func: Callable[[], None]): member = object.__new__(cls) member._value_ = arg_value member.func = func return member HEADERS_SPLIT = ("headers-split", run_headers_split) HEADERS_FULL = ("headers-full", run_headers_full) def main(): parse_command_line() try: func = Benchmark(options.benchmark).func except ValueError: known_benchmarks = [benchmark.value for benchmark in Benchmark] print( "Unknown benchmark: '{}', supported values are: {}" .format(options.benchmark, ", ".join(known_benchmarks)) ) return for _ in range(options.num_runs): func() if __name__ == '__main__': main() tornado-6.1.0/maint/benchmark/template_benchmark.py000077500000000000000000000030741374705040500224410ustar00rootroot00000000000000#!/usr/bin/env python # # A simple benchmark of tornado template rendering, based on # https://github.com/mitsuhiko/jinja2/blob/master/examples/bench.py import sys from timeit import Timer from tornado.options import options, define, parse_command_line from tornado.template import Template define('num', default=100, help='number of iterations') define('dump', default=False, help='print template generated code and exit') context = { 'page_title': 'mitsuhiko\'s benchmark', 'table': [dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9, j=10) for x in range(1000)] } tmpl = Template("""\ {{ page_title }}

{{ page_title }}

{% for row in table %} {% for cell in row %} {% end %} {% end %}
{{ cell }}
\ """) def render(): tmpl.generate(**context) def main(): parse_command_line() if options.dump: print(tmpl.code) sys.exit(0) t = Timer(render) results = t.timeit(options.num) / options.num print('%0.3f ms per iteration' % (results * 1000)) if __name__ == '__main__': main() tornado-6.1.0/maint/circlerefs/000077500000000000000000000000001374705040500164225ustar00rootroot00000000000000tornado-6.1.0/maint/circlerefs/circlerefs.py000077500000000000000000000057551374705040500211340ustar00rootroot00000000000000#!/usr/bin/env python """Test script to find circular references. Circular references are not leaks per se, because they will eventually be GC'd. However, on CPython, they prevent the reference-counting fast path from being used and instead rely on the slower full GC. This increases memory footprint and CPU overhead, so we try to eliminate circular references created by normal operation. """ import gc import traceback import types from tornado import web, ioloop, gen, httpclient def find_circular_references(garbage=None): def inner(level): for item in level: item_id = id(item) if item_id not in garbage_ids: continue if item_id in visited_ids: continue if item_id in stack_ids: candidate = stack[stack.index(item):] candidate.append(item) found.append(candidate) continue stack.append(item) stack_ids.add(item_id) inner(gc.get_referents(item)) stack.pop() stack_ids.remove(item_id) visited_ids.add(item_id) garbage = garbage or gc.garbage found = [] stack = [] stack_ids = set() garbage_ids = set(map(id, garbage)) visited_ids = set() inner(garbage) inner = None return found class CollectHandler(web.RequestHandler): @gen.coroutine def get(self): self.write("Collected: {}\n".format(gc.collect())) self.write("Garbage: {}\n".format(len(gc.garbage))) for circular in find_circular_references(): print('\n==========\n Circular \n==========') for item in circular: print(' ', repr(item)) for item in circular: if isinstance(item, types.FrameType): print('\nLocals:', item.f_locals) print('\nTraceback:', repr(item)) traceback.print_stack(item) class DummyHandler(web.RequestHandler): @gen.coroutine def get(self): self.write('ok\n') class DummyAsyncHandler(web.RequestHandler): @gen.coroutine def get(self): raise web.Finish('ok\n') application = web.Application([ (r'/dummy/', DummyHandler), (r'/dummyasync/', DummyAsyncHandler), (r'/collect/', CollectHandler), ], debug=True) @gen.coroutine def main(): gc.disable() gc.collect() gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_SAVEALL) print('GC disabled') print("Start on 8888") application.listen(8888, '127.0.0.1') # Do a little work. Alternately, could leave this script running and # poke at it with a browser. client = httpclient.AsyncHTTPClient() yield client.fetch('http://127.0.0.1:8888/dummy/') yield client.fetch('http://127.0.0.1:8888/dummyasync/', raise_error=False) # Now report on the results. resp = yield client.fetch('http://127.0.0.1:8888/collect/') print(resp.body) if __name__ == "__main__": ioloop.IOLoop.current().run_sync(main) tornado-6.1.0/maint/requirements.in000066400000000000000000000007731374705040500173630ustar00rootroot00000000000000# Requirements for tools used in the development of tornado. # Use virtualenv instead of venv; tox seems to get confused otherwise. # # maint/requirements.txt contains the pinned versions of all direct and # indirect dependencies; this file only contains direct dependencies # and is useful for upgrading. # Tornado's optional dependencies Twisted pycares pycurl # Other useful tools Sphinx>1.8.2 black coverage flake8 mypy==0.630 pep8 pyflakes sphinxcontrib-asyncio sphinx-rtd-theme tox twine virtualenv tornado-6.1.0/maint/requirements.txt000066400000000000000000000021531374705040500175660ustar00rootroot00000000000000alabaster==0.7.12 appdirs==1.4.3 attrs==19.1.0 automat==0.7.0 babel==2.6.0 black==19.3b0 bleach==3.1.1 certifi==2019.3.9 cffi==1.12.3 chardet==3.0.4 click==7.0 constantly==15.1.0 coverage==4.5.3 docutils==0.14 entrypoints==0.3 filelock==3.0.10 flake8==3.7.7 hyperlink==19.0.0 idna==2.8 imagesize==1.1.0 incremental==17.5.0 jinja2==2.10.1 markupsafe==1.1.1 mccabe==0.6.1 mypy-extensions==0.4.1 mypy==0.630 packaging==19.0 pep8==1.7.1 pkginfo==1.5.0.1 pluggy==0.9.0 py==1.8.0 pycares==3.0.0 pycodestyle==2.5.0 pycparser==2.19 pycurl==7.43.0.2 pyflakes==2.1.1 pygments==2.3.1 pyhamcrest==1.9.0 pyparsing==2.4.0 pytz==2019.1 readme-renderer==24.0 requests-toolbelt==0.9.1 requests==2.21.0 six==1.12.0 snowballstemmer==1.2.1 sphinx-rtd-theme==0.4.3 sphinx==2.0.1 sphinxcontrib-applehelp==1.0.1 sphinxcontrib-asyncio==0.2.0 sphinxcontrib-devhelp==1.0.1 sphinxcontrib-htmlhelp==1.0.2 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.2 sphinxcontrib-serializinghtml==1.1.3 toml==0.10.0 tox==3.9.0 tqdm==4.31.1 twine==1.13.0 twisted==19.7.0 typed-ast==1.1.2 urllib3==1.24.3 virtualenv==16.5.0 webencodings==0.5.1 zope.interface==4.6.0 tornado-6.1.0/maint/scripts/000077500000000000000000000000001374705040500157705ustar00rootroot00000000000000tornado-6.1.0/maint/scripts/custom_fixers/000077500000000000000000000000001374705040500206625ustar00rootroot00000000000000tornado-6.1.0/maint/scripts/custom_fixers/__init__.py000066400000000000000000000000001374705040500227610ustar00rootroot00000000000000tornado-6.1.0/maint/scripts/custom_fixers/fix_future_imports.py000066400000000000000000000041771374705040500252020ustar00rootroot00000000000000"""Updates all source files to import the same set of __future__ directives. """ from lib2to3 import fixer_base from lib2to3 import pytree from lib2to3.pgen2 import token from lib2to3.fixer_util import FromImport, Name, Comma, Newline # copied from fix_tuple_params.py def is_docstring(stmt): return isinstance(stmt, pytree.Node) and stmt.children[0].type == token.STRING class FixFutureImports(fixer_base.BaseFix): BM_compatible = True PATTERN = """import_from< 'from' module_name="__future__" 'import' any >""" def start_tree(self, tree, filename): self.found_future_import = False def new_future_import(self, old): new = FromImport("__future__", [Name("absolute_import", prefix=" "), Comma(), Name("division", prefix=" "), Comma(), Name("print_function", prefix=" ")]) if old is not None: new.prefix = old.prefix return new def transform(self, node, results): self.found_future_import = True return self.new_future_import(node) def finish_tree(self, tree, filename): if self.found_future_import: return if not isinstance(tree, pytree.Node): # Empty files (usually __init__.py) show up as a single Leaf # instead of a Node, so leave them alone return first_stmt = tree.children[0] if is_docstring(first_stmt): # Skip a line and add the import after the docstring tree.insert_child(1, Newline()) pos = 2 elif first_stmt.prefix: # No docstring, but an initial comment (perhaps a #! line). # Transfer the initial comment to a new blank line. newline = Newline() newline.prefix = first_stmt.prefix first_stmt.prefix = "" tree.insert_child(0, newline) pos = 1 else: # No comments or docstring, just insert at the start pos = 0 tree.insert_child(pos, self.new_future_import(None)) tree.insert_child(pos + 1, Newline()) # terminates the import stmt tornado-6.1.0/maint/scripts/custom_fixers/fix_unicode_literal.py000066400000000000000000000006271374705040500252510ustar00rootroot00000000000000from lib2to3 import fixer_base from lib2to3.fixer_util import String class FixUnicodeLiteral(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'u' trailer< '(' arg=any ')' > > """ def transform(self, node, results): arg = results["arg"] node.replace(String('u' + arg.value, prefix=node.prefix)) tornado-6.1.0/maint/scripts/download_wheels.py000077500000000000000000000022461374705040500215270ustar00rootroot00000000000000#!/usr/bin/env python3 import asyncio import json import pathlib import sys from tornado.httpclient import AsyncHTTPClient BASE_URL = "https://ci.appveyor.com/api" async def fetch_job(directory, job): http = AsyncHTTPClient() artifacts = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts") paths = [pathlib.PurePosixPath(a["fileName"]) for a in json.loads(artifacts.body)] for path in paths: artifact = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts/{path}") with open(directory.joinpath(path.name), "wb") as f: f.write(artifact.body) async def main(): http = AsyncHTTPClient() try: _, version = sys.argv except ValueError: print("usage: maint/scripts/download_wheels.py v6.0.1", file=sys.stderr) sys.exit(1) directory = pathlib.Path(f"downloads-{version}") directory.mkdir(exist_ok=True) build = await http.fetch(f"{BASE_URL}/projects/bdarnell/tornado/branch/{version}") jobs = [job["jobId"] for job in json.loads(build.body)["build"]["jobs"]] await asyncio.gather(*(fetch_job(directory, job) for job in jobs)) if __name__ == "__main__": asyncio.run(main()) tornado-6.1.0/maint/scripts/run_autopep8.sh000077500000000000000000000010261374705040500207570ustar00rootroot00000000000000#!/bin/sh # Runs autopep8 in the configuration used for tornado. # # W602 is "deprecated form of raising exception", but the fix is incorrect # (and I'm not sure if the three-argument form of raise is really deprecated # in the first place) # E501 is "line longer than 80 chars" but the automated fix is ugly. # E301 adds a blank line between docstring and first method # E309 adds a blank line between class declaration and docstring (?) autopep8 --ignore=W602,E501,E301,E309 -i tornado/*.py tornado/platform/*.py tornado/test/*.py tornado-6.1.0/maint/scripts/run_fixers.py000077500000000000000000000002561374705040500205340ustar00rootroot00000000000000#!/usr/bin/env python # Usage is like 2to3: # $ maint/scripts/run_fixers.py -wn --no-diffs tornado import sys from lib2to3.main import main sys.exit(main("custom_fixers")) tornado-6.1.0/maint/scripts/test_resolvers.py000077500000000000000000000026551374705040500214400ustar00rootroot00000000000000#!/usr/bin/env python import pprint import socket from tornado import gen from tornado.ioloop import IOLoop from tornado.netutil import Resolver, ThreadedResolver from tornado.options import parse_command_line, define, options try: import twisted except ImportError: twisted = None try: import pycares except ImportError: pycares = None define('family', default='unspec', help='Address family to query: unspec, inet, or inet6') @gen.coroutine def main(): args = parse_command_line() if not args: args = ['localhost', 'www.google.com', 'www.facebook.com', 'www.dropbox.com'] resolvers = [Resolver(), ThreadedResolver()] if twisted is not None: from tornado.platform.twisted import TwistedResolver resolvers.append(TwistedResolver()) if pycares is not None: from tornado.platform.caresresolver import CaresResolver resolvers.append(CaresResolver()) family = { 'unspec': socket.AF_UNSPEC, 'inet': socket.AF_INET, 'inet6': socket.AF_INET6, }[options.family] for host in args: print('Resolving %s' % host) for resolver in resolvers: addrinfo = yield resolver.resolve(host, 80, family) print('%s: %s' % (resolver.__class__.__name__, pprint.pformat(addrinfo))) print() if __name__ == '__main__': IOLoop.instance().run_sync(main) tornado-6.1.0/maint/test/000077500000000000000000000000001374705040500152605ustar00rootroot00000000000000tornado-6.1.0/maint/test/README000066400000000000000000000002711374705040500161400ustar00rootroot00000000000000This directory contains additional tests that are not included in the main suite (because e.g. they have extra dependencies, run slowly, or produce more output than a simple pass/fail) tornado-6.1.0/maint/test/cython/000077500000000000000000000000001374705040500165645ustar00rootroot00000000000000tornado-6.1.0/maint/test/cython/.gitignore000066400000000000000000000000361374705040500205530ustar00rootroot00000000000000.eggs cythonapp.egg-info dist tornado-6.1.0/maint/test/cython/MANIFEST.in000066400000000000000000000000261374705040500203200ustar00rootroot00000000000000include cythonapp.pyx tornado-6.1.0/maint/test/cython/cythonapp.pyx000066400000000000000000000014141374705040500213330ustar00rootroot00000000000000import cython from tornado import gen import pythonmodule async def native_coroutine(): x = await pythonmodule.hello() if x != "hello": raise ValueError("expected hello, got %r" % x) return "goodbye" @gen.coroutine def decorated_coroutine(): x = yield pythonmodule.hello() if x != "hello": raise ValueError("expected hello, got %r" % x) return "goodbye" # The binding directive is necessary for compatibility with # ArgReplacer (and therefore return_future), but only because # this is a static function. @cython.binding(True) def function_with_args(one, two, three): return (one, two, three) class AClass: # methods don't need the binding directive. def method_with_args(one, two, three): return (one, two, three) tornado-6.1.0/maint/test/cython/cythonapp_test.py000066400000000000000000000022541374705040500222050ustar00rootroot00000000000000from tornado.testing import AsyncTestCase, gen_test from tornado.util import ArgReplacer import unittest import cythonapp class CythonCoroutineTest(AsyncTestCase): @gen_test def test_native_coroutine(self): x = yield cythonapp.native_coroutine() self.assertEqual(x, "goodbye") @gen_test def test_decorated_coroutine(self): x = yield cythonapp.decorated_coroutine() self.assertEqual(x, "goodbye") class CythonArgReplacerTest(unittest.TestCase): def test_arg_replacer_function(self): replacer = ArgReplacer(cythonapp.function_with_args, 'two') args = (1, 'old', 3) kwargs = {} self.assertEqual(replacer.get_old_value(args, kwargs), 'old') self.assertEqual(replacer.replace('new', args, kwargs), ('old', [1, 'new', 3], {})) def test_arg_replacer_method(self): replacer = ArgReplacer(cythonapp.AClass().method_with_args, 'two') args = (1, 'old', 3) kwargs = {} self.assertEqual(replacer.get_old_value(args, kwargs), 'old') self.assertEqual(replacer.replace('new', args, kwargs), ('old', [1, 'new', 3], {})) tornado-6.1.0/maint/test/cython/pythonmodule.py000066400000000000000000000001571374705040500216700ustar00rootroot00000000000000from tornado import gen @gen.coroutine def hello(): yield gen.sleep(0.001) raise gen.Return("hello") tornado-6.1.0/maint/test/cython/setup.py000066400000000000000000000005251374705040500203000ustar00rootroot00000000000000from setuptools import setup try: import Cython.Build except: Cython = None if Cython is None: ext_modules = None else: ext_modules = Cython.Build.cythonize('cythonapp.pyx') setup( name='cythonapp', py_modules=['cythonapp_test', 'pythonmodule'], ext_modules=ext_modules, setup_requires='Cython>=0.23.1', ) tornado-6.1.0/maint/test/cython/tox.ini000066400000000000000000000006221374705040500200770ustar00rootroot00000000000000[tox] # This currently segfaults on pypy. envlist = py27,py35,py36 [testenv] deps = ../../.. Cython>=0.23.3 backports_abc>=0.4 singledispatch commands = python -m unittest cythonapp_test # Most of these are defaults, but if you specify any you can't fall back # defaults for the others. basepython = py27: python2.7 py35: python3.5 py36: python3.6 tornado-6.1.0/maint/test/mypy/000077500000000000000000000000001374705040500162565ustar00rootroot00000000000000tornado-6.1.0/maint/test/mypy/.gitignore000066400000000000000000000000211374705040500202370ustar00rootroot00000000000000UNKNOWN.egg-info tornado-6.1.0/maint/test/mypy/bad.py000066400000000000000000000002221374705040500173520ustar00rootroot00000000000000from tornado.web import RequestHandler class MyHandler(RequestHandler): def get(self) -> str: # Deliberate type error return "foo" tornado-6.1.0/maint/test/mypy/good.py000066400000000000000000000003571374705040500175650ustar00rootroot00000000000000from tornado import gen from tornado.web import RequestHandler class MyHandler(RequestHandler): def get(self) -> None: self.write("foo") async def post(self) -> None: await gen.sleep(1) self.write("foo") tornado-6.1.0/maint/test/mypy/setup.py000066400000000000000000000000461374705040500177700ustar00rootroot00000000000000from setuptools import setup setup() tornado-6.1.0/maint/test/mypy/tox.ini000066400000000000000000000004561374705040500175760ustar00rootroot00000000000000# Test that the py.typed marker file is respected and client # application code can be typechecked using tornado's published # annotations. [tox] envlist = py37 [testenv] deps = ../../.. mypy whitelist_externals = /bin/sh commands = mypy good.py /bin/sh -c '! mypy bad.py' tornado-6.1.0/maint/test/redbot/000077500000000000000000000000001374705040500165375ustar00rootroot00000000000000tornado-6.1.0/maint/test/redbot/README000066400000000000000000000005111374705040500174140ustar00rootroot00000000000000Redbot is an HTTP validator that checks for common problems, especially related to cacheability. These tests ensure that Tornado's default behavior is correct (but note that this guarantee does not automatically extend to applications built on Tornado since application behavior can impact cacheability. http://redbot.org/abouttornado-6.1.0/maint/test/redbot/red_test.py000077500000000000000000000211451374705040500207300ustar00rootroot00000000000000#!/usr/bin/env python import logging from redbot.resource import HttpResource import redbot.speak as rs import thor import threading from tornado import gen from tornado.options import parse_command_line from tornado.testing import AsyncHTTPTestCase from tornado.web import RequestHandler, Application, asynchronous import unittest class HelloHandler(RequestHandler): def get(self): self.write("Hello world") class RedirectHandler(RequestHandler): def get(self, path): self.redirect(path, status=int(self.get_argument('status', '302'))) class PostHandler(RequestHandler): def post(self): assert self.get_argument('foo') == 'bar' self.redirect('/hello', status=303) class ChunkedHandler(RequestHandler): @asynchronous @gen.engine def get(self): self.write('hello ') yield gen.Task(self.flush) self.write('world') yield gen.Task(self.flush) self.finish() class CacheHandler(RequestHandler): def get(self, computed_etag): self.write(computed_etag) def compute_etag(self): return self._write_buffer[0] class TestMixin(object): def get_handlers(self): return [ ('/hello', HelloHandler), ('/redirect(/.*)', RedirectHandler), ('/post', PostHandler), ('/chunked', ChunkedHandler), ('/cache/(.*)', CacheHandler), ] def get_app_kwargs(self): return dict(static_path='.') def get_allowed_warnings(self): return [ # We can't set a non-heuristic freshness at the framework level, # so just ignore this warning rs.FRESHNESS_HEURISTIC, # For our small test responses the Content-Encoding header # wipes out any gains from compression rs.CONNEG_GZIP_BAD, ] def get_allowed_errors(self): return [] def check_url(self, path, method='GET', body=None, headers=None, expected_status=200, allowed_warnings=None, allowed_errors=None): url = self.get_url(path) red = self.run_redbot(url, method, body, headers) if not red.response.complete: if isinstance(red.response.http_error, Exception): logging.warning((red.response.http_error.desc, vars(red.response.http_error), url)) raise red.response.http_error.res_error else: raise Exception("unknown error; incomplete response") self.assertEqual(int(red.response.status_code), expected_status) allowed_warnings = (allowed_warnings or []) + self.get_allowed_warnings() allowed_errors = (allowed_errors or []) + self.get_allowed_errors() errors = [] warnings = [] for msg in red.response.notes: if msg.level == 'bad': logger = logging.error if not isinstance(msg, tuple(allowed_errors)): errors.append(msg) elif msg.level == 'warning': logger = logging.warning if not isinstance(msg, tuple(allowed_warnings)): warnings.append(msg) elif msg.level in ('good', 'info', 'uri'): logger = logging.info else: raise Exception('unknown level' + msg.level) logger('%s: %s (%s)', msg.category, msg.show_summary('en'), msg.__class__.__name__) logger(msg.show_text('en')) self.assertEqual(len(warnings) + len(errors), 0, 'Had %d unexpected warnings and %d errors' % (len(warnings), len(errors))) def run_redbot(self, url, method, body, headers): red = HttpResource(url, method=method, req_body=body, req_hdrs=headers) def work(): red.run(thor.stop) thor.run() self.io_loop.add_callback(self.stop) thread = threading.Thread(target=work) thread.start() self.wait() thread.join() return red def test_hello(self): self.check_url('/hello') def test_static(self): # TODO: 304 responses SHOULD return the same etag that a full # response would. We currently do for If-None-Match, but not # for If-Modified-Since (because IMS does not otherwise # require us to read the file from disk) self.check_url('/static/red_test.py', allowed_warnings=[rs.MISSING_HDRS_304]) def test_static_versioned_url(self): self.check_url('/static/red_test.py?v=1234', allowed_warnings=[rs.MISSING_HDRS_304]) def test_redirect(self): self.check_url('/redirect/hello', expected_status=302) def test_permanent_redirect(self): self.check_url('/redirect/hello?status=301', expected_status=301) def test_404(self): self.check_url('/404', expected_status=404) def test_post(self): body = 'foo=bar' # Without an explicit Content-Length redbot will try to send the # request chunked. self.check_url( '/post', method='POST', body=body, headers=[('Content-Length', str(len(body))), ('Content-Type', 'application/x-www-form-urlencoded')], expected_status=303) def test_chunked(self): self.check_url('/chunked') def test_strong_etag_match(self): computed_etag = '"xyzzy"' etags = '"xyzzy"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=304) def test_multiple_strong_etag_match(self): computed_etag = '"xyzzy1"' etags = '"xyzzy1", "xyzzy2"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=304) def test_strong_etag_not_match(self): computed_etag = '"xyzzy"' etags = '"xyzzy1"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=200) def test_multiple_strong_etag_not_match(self): computed_etag = '"xyzzy"' etags = '"xyzzy1", "xyzzy2"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=200) def test_wildcard_etag(self): computed_etag = '"xyzzy"' etags = '*' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=304, allowed_warnings=[rs.MISSING_HDRS_304]) def test_weak_etag_match(self): computed_etag = '"xyzzy1"' etags = 'W/"xyzzy1"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=304) def test_multiple_weak_etag_match(self): computed_etag = '"xyzzy2"' etags = 'W/"xyzzy1", W/"xyzzy2"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=304) def test_weak_etag_not_match(self): computed_etag = '"xyzzy2"' etags = 'W/"xyzzy1"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=200) def test_multiple_weak_etag_not_match(self): computed_etag = '"xyzzy3"' etags = 'W/"xyzzy1", W/"xyzzy2"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=200) class DefaultHTTPTest(AsyncHTTPTestCase, TestMixin): def get_app(self): return Application(self.get_handlers(), **self.get_app_kwargs()) class GzipHTTPTest(AsyncHTTPTestCase, TestMixin): def get_app(self): return Application(self.get_handlers(), gzip=True, **self.get_app_kwargs()) def get_allowed_errors(self): return super().get_allowed_errors() + [ # TODO: The Etag is supposed to change when Content-Encoding is # used. This should be fixed, but it's difficult to do with the # way GZipContentEncoding fits into the pipeline, and in practice # it doesn't seem likely to cause any problems as long as we're # using the correct Vary header. rs.VARY_ETAG_DOESNT_CHANGE, ] if __name__ == '__main__': parse_command_line() unittest.main() tornado-6.1.0/maint/test/redbot/tox.ini000066400000000000000000000003231374705040500200500ustar00rootroot00000000000000[tox] envlist = py27 setupdir=../../.. [testenv] commands = python red_test.py deps = # Newer versions of thor have a bug with redbot (5/18/13) thor==0.2.0 git+https://github.com/mnot/redbot.git tornado-6.1.0/maint/test/websocket/000077500000000000000000000000001374705040500172465ustar00rootroot00000000000000tornado-6.1.0/maint/test/websocket/.gitignore000066400000000000000000000000111374705040500212260ustar00rootroot00000000000000reports/ tornado-6.1.0/maint/test/websocket/client.py000066400000000000000000000025271374705040500211040ustar00rootroot00000000000000import logging from tornado import gen from tornado.ioloop import IOLoop from tornado.options import define, options, parse_command_line from tornado.websocket import websocket_connect define('url', default='ws://localhost:9001') define('name', default='Tornado') @gen.engine def run_tests(): url = options.url + '/getCaseCount' control_ws = yield websocket_connect(url, None) num_tests = int((yield control_ws.read_message())) logging.info('running %d cases', num_tests) msg = yield control_ws.read_message() assert msg is None for i in range(1, num_tests + 1): logging.info('running test case %d', i) url = options.url + '/runCase?case=%d&agent=%s' % (i, options.name) test_ws = yield websocket_connect(url, None, compression_options={}) while True: message = yield test_ws.read_message() if message is None: break test_ws.write_message(message, binary=isinstance(message, bytes)) url = options.url + '/updateReports?agent=%s' % options.name update_ws = yield websocket_connect(url, None) msg = yield update_ws.read_message() assert msg is None IOLoop.instance().stop() def main(): parse_command_line() IOLoop.instance().add_callback(run_tests) IOLoop.instance().start() if __name__ == '__main__': main() tornado-6.1.0/maint/test/websocket/fuzzingclient.json000066400000000000000000000010321374705040500230300ustar00rootroot00000000000000{ "options": {"failByDrop": false}, "outdir": "./reports/servers", "servers": [ {"agent": "Tornado/py27", "url": "ws://localhost:9001", "options": {"version": 18}}, {"agent": "Tornado/py35", "url": "ws://localhost:9002", "options": {"version": 18}}, {"agent": "Tornado/pypy", "url": "ws://localhost:9003", "options": {"version": 18}} ], "cases": ["*"], "exclude-cases": ["9.*", "12.*.1","12.2.*", "12.3.*", "12.4.*", "12.5.*", "13.*.1"], "exclude-agent-cases": {} } tornado-6.1.0/maint/test/websocket/fuzzingserver.json000066400000000000000000000004131374705040500230620ustar00rootroot00000000000000 { "url": "ws://localhost:9001", "options": {"failByDrop": false}, "outdir": "./reports/clients", "webport": 8080, "cases": ["*"], "exclude-cases": ["9.*", "12.*.1","12.2.*", "12.3.*", "12.4.*", "12.5.*", "13.*.1"], "exclude-agent-cases": {} } tornado-6.1.0/maint/test/websocket/run-client.sh000077500000000000000000000005411374705040500216650ustar00rootroot00000000000000#!/bin/sh set -e tox .tox/py27/bin/wstest -m fuzzingserver & FUZZING_SERVER_PID=$! sleep 1 .tox/py27/bin/python client.py --name='Tornado/py27' .tox/py35/bin/python client.py --name='Tornado/py35' .tox/pypy/bin/python client.py --name='Tornado/pypy' kill $FUZZING_SERVER_PID wait echo "Tests complete. Output is in ./reports/clients/index.html" tornado-6.1.0/maint/test/websocket/run-server.sh000077500000000000000000000014271374705040500217210ustar00rootroot00000000000000#!/bin/sh # # Runs the autobahn websocket conformance test against tornado in both # python2 and python3. Output goes in ./reports/servers/index.html. # # The --cases and --exclude arguments can be used to run only part of # the suite. The default is --exclude="9.*" to skip the relatively slow # performance tests; pass --exclude="" to override and include them. set -e # build/update the virtualenvs tox .tox/py27/bin/python server.py --port=9001 & PY27_SERVER_PID=$! .tox/py35/bin/python server.py --port=9002 & PY35_SERVER_PID=$! .tox/pypy/bin/python server.py --port=9003 & PYPY_SERVER_PID=$! sleep 1 .tox/py27/bin/wstest -m fuzzingclient kill $PY27_SERVER_PID kill $PY35_SERVER_PID kill $PYPY_SERVER_PID wait echo "Tests complete. Output is in ./reports/servers/index.html" tornado-6.1.0/maint/test/websocket/server.py000066400000000000000000000011371374705040500211300ustar00rootroot00000000000000from tornado.ioloop import IOLoop from tornado.options import define, options, parse_command_line from tornado.websocket import WebSocketHandler from tornado.web import Application define('port', default=9000) class EchoHandler(WebSocketHandler): def on_message(self, message): self.write_message(message, binary=isinstance(message, bytes)) def get_compression_options(self): return {} if __name__ == '__main__': parse_command_line() app = Application([ ('/', EchoHandler), ]) app.listen(options.port, address='127.0.0.1') IOLoop.instance().start() tornado-6.1.0/maint/test/websocket/tox.ini000066400000000000000000000004441374705040500205630ustar00rootroot00000000000000# We don't actually use tox to run this test, but it's the easiest way # to install autobahn and build the speedups module. # See run.sh for the real test runner. [tox] envlist = py27, py35, pypy setupdir=../../.. [testenv] commands = python -c pass [testenv:py27] deps = autobahntestsuite tornado-6.1.0/maint/vm/000077500000000000000000000000001374705040500147235ustar00rootroot00000000000000tornado-6.1.0/maint/vm/README000066400000000000000000000016431374705040500156070ustar00rootroot00000000000000This directory contains virtual machine setup scripts for testing Tornado. Requirements: Vagrant (http://vagrantup.com) and VirtualBox (http://virtualbox.org). Vagrant provides an easy download for Ubuntu images, base images for other platforms are harder to find and can be built with VeeWee (https://github.com/jedi4ever/veewee). Usage: cd to the appropriate directory and run `vagrant up`, then `vagrant ssh`. From there, simply run `tox` to run the full test suite, or cd to /tornado and test manually. Afterwards, use `vagrant suspend` or `vagrant destroy` to clean up. Notes: Python distutils (and therefore tox) assume that if the platform supports hard links, they can be used in the Tornado source directory. VirtualBox's shared folder filesystem does not support hard links (or symlinks), so we have to use NFS shared folders instead. (which has the unfortunate side effect of requiring sudo on the host machine) tornado-6.1.0/maint/vm/freebsd/000077500000000000000000000000001374705040500163355ustar00rootroot00000000000000tornado-6.1.0/maint/vm/freebsd/Vagrantfile000066400000000000000000000015471374705040500205310ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! VAGRANTFILE_API_VERSION = "2" Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.box = "chef/freebsd-10.0" config.vm.network "private_network", type: "dhcp" # Share an additional folder to the guest VM. The first argument is # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. config.vm.synced_folder "../../..", "/tornado", type: "nfs" # Override the default /vagrant mapping to use nfs, since freebsd doesn't # support other folder types. config.vm.synced_folder ".", "/vagrant", type: "nfs" config.ssh.shell = "/bin/sh" config.vm.provision :shell, :path => "setup.sh" end tornado-6.1.0/maint/vm/freebsd/setup.sh000066400000000000000000000003551374705040500200340ustar00rootroot00000000000000#!/bin/sh chsh -s bash vagrant PACKAGES=" curl python python34 py27-pip py27-virtualenv " PIP_PACKAGES=" futures pycurl tox " ASSUME_ALWAYS_YES=true pkg install $PACKAGES pip install $PIP_PACKAGES /tornado/maint/vm/shared-setup.sh tornado-6.1.0/maint/vm/freebsd/tox.ini000066400000000000000000000004751374705040500176560ustar00rootroot00000000000000[tox] envlist=py27-full, py27, py34 setupdir=/tornado # /home is a symlink to /usr/home, but tox doesn't like symlinks here toxworkdir=/usr/home/vagrant/tox-tornado [testenv] commands = python -m tornado.test.runtests {posargs:} [testenv:py27-full] # twisted's tests fail on freebsd deps = futures pycurl tornado-6.1.0/maint/vm/shared-setup.sh000077500000000000000000000004251374705040500176670ustar00rootroot00000000000000#!/bin/sh # Run at the end of each vm's provisioning script set -e # Link tox.ini into the home directory so you can run tox immediately # after ssh'ing in without cd'ing to /vagrant (since cd'ing to /tornado # gets the wrong config) ln -sf /vagrant/tox.ini ~vagrant/tox.ini tornado-6.1.0/maint/vm/ubuntu12.04/000077500000000000000000000000001374705040500166325ustar00rootroot00000000000000tornado-6.1.0/maint/vm/ubuntu12.04/Vagrantfile000066400000000000000000000004651374705040500210240ustar00rootroot00000000000000Vagrant::Config.run do |config| config.vm.box = "precise64" config.vm.box_url = "http://files.vagrantup.com/precise64.box" config.vm.network :hostonly, "172.19.1.5" config.vm.share_folder("tornado", "/tornado", "../../..", :nfs=> true) config.vm.provision :shell, :path => "setup.sh" endtornado-6.1.0/maint/vm/ubuntu12.04/setup.sh000066400000000000000000000014531374705040500203310ustar00rootroot00000000000000#!/bin/sh set -e apt-get update # libcurl4-gnutls-dev is the default if you ask for libcurl4-dev, but it # has bugs that make our tests deadlock (the relevant tests detect this and # disable themselves, but it means that to get full coverage we have to use # the openssl version). # The oddly-named python-software-properties includes add-apt-repository. APT_PACKAGES=" python-pip python-dev libcurl4-openssl-dev python-software-properties " apt-get -y install $APT_PACKAGES # Ubuntu 12.04 has python 2.7 as default; install more from here. add-apt-repository ppa:fkrull/deadsnakes apt-get update DEADSNAKES_PACKAGES=" python3.5 python3.5-dev " apt-get -y install $DEADSNAKES_PACKAGES PIP_PACKAGES=" futures pycurl tox twisted virtualenv " pip install $PIP_PACKAGES /tornado/maint/vm/shared-setup.sh tornado-6.1.0/maint/vm/ubuntu12.04/tox.ini000066400000000000000000000012451374705040500201470ustar00rootroot00000000000000[tox] envlist = py27-full, py27, py27-select, py27-twisted setupdir=/tornado toxworkdir=/home/vagrant/tox-tornado [testenv] commands = python -m tornado.test.runtests {posargs:} [testenv:py27-full] basepython = python2.7 deps = futures pycurl twisted==12.2.0 [testenv:py27-select] basepython = python2.7 deps = futures pycurl twisted==12.2.0 commands = python -m tornado.test.runtests --ioloop=tornado.platform.select.SelectIOLoop {posargs:} [testenv:py27-twisted] basepython = python2.7 deps = futures pycurl twisted==12.2.0 commands = python -m tornado.test.runtests --ioloop=tornado.platform.twisted.TwistedIOLoop {posargs:} tornado-6.1.0/maint/vm/ubuntu14.04/000077500000000000000000000000001374705040500166345ustar00rootroot00000000000000tornado-6.1.0/maint/vm/ubuntu14.04/Vagrantfile000066400000000000000000000003701374705040500210210ustar00rootroot00000000000000Vagrant::Config.run do |config| config.vm.box = "ubuntu/trusty64" config.vm.network :hostonly, "172.19.1.8" config.vm.share_folder("tornado", "/tornado", "../../..", :nfs=> true) config.vm.provision :shell, :path => "setup.sh" endtornado-6.1.0/maint/vm/ubuntu14.04/setup.sh000066400000000000000000000010541374705040500203300ustar00rootroot00000000000000#!/bin/sh set -e apt-get update # libcurl4-gnutls-dev is the default if you ask for libcurl4-dev, but it # has bugs that make our tests deadlock (the relevant tests detect this and # disable themselves, but it means that to get full coverage we have to use # the openssl version). APT_PACKAGES=" python-pip python-dev python3-pycurl libcurl4-openssl-dev " apt-get -y install $APT_PACKAGES # Ubuntu 14.04 includes python 2.7 and 3.4. PIP_PACKAGES=" futures pycurl tox twisted virtualenv " pip install $PIP_PACKAGES /tornado/maint/vm/shared-setup.sh tornado-6.1.0/maint/vm/ubuntu14.04/tox.ini000066400000000000000000000013111374705040500201430ustar00rootroot00000000000000[tox] envlist = py27-full, py34, py27, py27-select, py27-twisted setupdir=/tornado toxworkdir=/home/vagrant/tox-tornado [testenv] commands = python -m tornado.test.runtests {posargs:} [testenv:py27-full] basepython = python2.7 deps = futures mock pycurl twisted==14.0.0 [testenv:py27-select] basepython = python2.7 deps = futures mock pycurl twisted==14.0.0 commands = python -m tornado.test.runtests --ioloop=tornado.platform.select.SelectIOLoop {posargs:} [testenv:py27-twisted] basepython = python2.7 deps = futures mock pycurl twisted==14.0.0 commands = python -m tornado.test.runtests --ioloop=tornado.platform.twisted.TwistedIOLoop {posargs:} tornado-6.1.0/maint/vm/windows/000077500000000000000000000000001374705040500164155ustar00rootroot00000000000000tornado-6.1.0/maint/vm/windows/bootstrap.py000077500000000000000000000062761374705040500210220ustar00rootroot00000000000000#!/usr/bin/env python r"""Installs files needed for tornado testing on windows. These instructions are compatible with the VMs provided by http://modern.ie. The bootstrapping script works on the WinXP/IE6 and Win8/IE10 configurations, although tornado's tests do not pass on XP. 1) Install virtualbox guest additions (from the device menu in virtualbox) 2) Set up a shared folder to the root of your tornado repo. It must be a read-write mount to use tox, although the tests can be run directly in a read-only mount. This will probably assign drive letter E:. 3) Install Python 2.7 from python.org. 4) Run this script by double-clicking it, or running "c:\python27\python.exe bootstrap.py" in a shell. To run the tests by hand, cd to e:\ and run c:\python27\python.exe -m tornado.test.runtests To run the tests with tox, cd to e:\maint\vm\windows and run c:\python27\scripts\tox To run under cygwin (which must be installed separately), run cd /cygdrive/e; python -m tornado.test.runtests """ import os import subprocess import sys import urllib TMPDIR = r'c:\tornado_bootstrap' PYTHON_VERSIONS = [ (r'c:\python27\python.exe', 'http://www.python.org/ftp/python/2.7.3/python-2.7.3.msi'), (r'c:\python36\python.exe', 'http://www.python.org/ftp/python/3.6.0/python-3.6.0.msi'), ] SCRIPTS_DIR = r'c:\python27\scripts' EASY_INSTALL = os.path.join(SCRIPTS_DIR, 'easy_install.exe') PY_PACKAGES = ['tox', 'virtualenv', 'pip'] def download_to_cache(url, local_name=None): if local_name is None: local_name = url.split('/')[-1] filename = os.path.join(TMPDIR, local_name) if not os.path.exists(filename): data = urllib.urlopen(url).read() with open(filename, 'wb') as f: f.write(data) return filename def main(): if not os.path.exists(TMPDIR): os.mkdir(TMPDIR) os.chdir(TMPDIR) for exe, url in PYTHON_VERSIONS: if os.path.exists(exe): print("%s already exists, skipping" % exe) continue print("Installing %s" % url) filename = download_to_cache(url) # http://blog.jaraco.com/2012/01/how-i-install-python-on-windows.html subprocess.check_call(['msiexec', '/i', filename, 'ALLUSERS=1', '/passive']) if not os.path.exists(EASY_INSTALL): filename = download_to_cache('http://python-distribute.org/distribute_setup.py') subprocess.check_call([sys.executable, filename]) subprocess.check_call([EASY_INSTALL] + PY_PACKAGES) # cygwin's setup.exe doesn't like being run from a script (looks # UAC-related). If it did, something like this might install it. # (install python, python-setuptools, python3, and easy_install # unittest2 (cygwin's python 2 is 2.6)) #filename = download_to_cache('http://cygwin.com/setup.exe') #CYGTMPDIR = os.path.join(TMPDIR, 'cygwin') #if not os.path.exists(CYGTMPDIR): # os.mkdir(CYGTMPDIR) ## http://www.jbmurphy.com/2011/06/16/powershell-script-to-install-cygwin/ #CYGWIN_ARGS = [filename, '-q', '-l', CYGTMPDIR, # '-s', 'http://mirror.nyi.net/cygwin/', '-R', r'c:\cygwin'] #subprocess.check_call(CYGWIN_ARGS) if __name__ == '__main__': main() tornado-6.1.0/maint/vm/windows/tox.ini000066400000000000000000000012071374705040500177300ustar00rootroot00000000000000[tox] envlist = py27-full, py27, py36, py27-opt, py36-monotonic setupdir = e:\ toxworkdir = c:\tox-tornado [testenv] commands = python -m tornado.test.runtests {posargs:} [testenv:py27-full] basepython = python2.7 deps = futures mock [testenv:py36] # TODO: still needed? # tox's path mappings haven't been updated for py33 yet. basepython = c:\python36\python.exe [testenv:py36-monotonic] basepython = c:\python36\python.exe commands = python -m tornado.test.runtests --ioloop_time_monotonic {posargs:} [testenv:py27-opt] basepython = python2.7 deps = futures mock commands = python -O -m tornado.test.runtests {posargs:} tornado-6.1.0/runtests.sh000077500000000000000000000007771374705040500154320ustar00rootroot00000000000000#!/bin/sh # Run the Tornado test suite. # # Also consider using tox, which uses virtualenv to run the test suite # under multiple versions of python. cd $(dirname $0) # "python -m" differs from "python tornado/test/runtests.py" in how it sets # up the default python path. "python -m" uses the current directory, # while "python file.py" uses the directory containing "file.py" (which is # not what you want if file.py appears within a package you want to import # from) python -m tornado.test.runtests "$@" tornado-6.1.0/setup.cfg000066400000000000000000000005651374705040500150200ustar00rootroot00000000000000[metadata] license_file = LICENSE [mypy] python_version = 3.5 no_implicit_optional = True [mypy-tornado.*,tornado.platform.*] disallow_untyped_defs = True # It's generally too tedious to require type annotations in tests, but # we do want to type check them as much as type inference allows. [mypy-tornado.test.*] disallow_untyped_defs = False check_untyped_defs = True tornado-6.1.0/setup.py000066400000000000000000000135161374705040500147110ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # type: ignore import os import platform import sys import warnings try: # Use setuptools if available, for install_requires (among other things). import setuptools from setuptools import setup except ImportError: setuptools = None from distutils.core import setup from distutils.core import Extension # The following code is copied from # https://github.com/mongodb/mongo-python-driver/blob/master/setup.py # to support installing without the extension on platforms where # no compiler is available. from distutils.command.build_ext import build_ext class custom_build_ext(build_ext): """Allow C extension building to fail. The C extension speeds up websocket masking, but is not essential. """ warning_message = """ ******************************************************************** WARNING: %s could not be compiled. No C extensions are essential for Tornado to run, although they do result in significant speed improvements for websockets. %s Here are some hints for popular operating systems: If you are seeing this message on Linux you probably need to install GCC and/or the Python development package for your version of Python. Debian and Ubuntu users should issue the following command: $ sudo apt-get install build-essential python-dev RedHat and CentOS users should issue the following command: $ sudo yum install gcc python-devel Fedora users should issue the following command: $ sudo dnf install gcc python-devel MacOS users should run: $ xcode-select --install ******************************************************************** """ def run(self): try: build_ext.run(self) except Exception: e = sys.exc_info()[1] sys.stdout.write("%s\n" % str(e)) warnings.warn( self.warning_message % ( "Extension modules", "There was an issue with " "your platform configuration" " - see above.", ) ) def build_extension(self, ext): name = ext.name try: build_ext.build_extension(self, ext) except Exception: e = sys.exc_info()[1] sys.stdout.write("%s\n" % str(e)) warnings.warn( self.warning_message % ( "The %s extension " "module" % (name,), "The output above " "this warning shows how " "the compilation " "failed.", ) ) kwargs = {} with open("tornado/__init__.py") as f: ns = {} exec(f.read(), ns) version = ns["version"] with open("README.rst") as f: kwargs["long_description"] = f.read() if ( platform.python_implementation() == "CPython" and os.environ.get("TORNADO_EXTENSION") != "0" ): # This extension builds and works on pypy as well, although pypy's jit # produces equivalent performance. kwargs["ext_modules"] = [ Extension("tornado.speedups", sources=["tornado/speedups.c"]) ] if os.environ.get("TORNADO_EXTENSION") != "1": # Unless the user has specified that the extension is mandatory, # fall back to the pure-python implementation on any build failure. kwargs["cmdclass"] = {"build_ext": custom_build_ext} if setuptools is not None: python_requires = ">= 3.5" kwargs["python_requires"] = python_requires setup( name="tornado", version=version, packages=["tornado", "tornado.test", "tornado.platform"], package_data={ # data files need to be listed both here (which determines what gets # installed) and in MANIFEST.in (which determines what gets included # in the sdist tarball) "tornado": ["py.typed"], "tornado.test": [ "README", "csv_translations/fr_FR.csv", "gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo", "gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po", "options_test.cfg", "options_test_types.cfg", "options_test_types_str.cfg", "static/robots.txt", "static/sample.xml", "static/sample.xml.gz", "static/sample.xml.bz2", "static/dir/index.html", "static_foo.txt", "templates/utf8.html", "test.crt", "test.key", ], }, author="Facebook", author_email="python-tornado@googlegroups.com", url="http://www.tornadoweb.org/", license="http://www.apache.org/licenses/LICENSE-2.0", description=( "Tornado is a Python web framework and asynchronous networking library," " originally developed at FriendFeed." ), classifiers=[ "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ], **kwargs ) tornado-6.1.0/tornado/000077500000000000000000000000001374705040500146375ustar00rootroot00000000000000tornado-6.1.0/tornado/__init__.py000066400000000000000000000017721374705040500167570ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Tornado web server and tools.""" # version is a human-readable version number. # version_info is a four-tuple for programmatic comparison. The first # three numbers are the components of the version number. The fourth # is zero for an official release, positive for a development branch, # or negative for a release candidate or beta (after the base version # number has been incremented) version = "6.1" version_info = (6, 1, 0, 0) tornado-6.1.0/tornado/_locale_data.py000066400000000000000000000110231374705040500175750ustar00rootroot00000000000000# Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Data used by the tornado.locale module.""" LOCALE_NAMES = { "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"}, "am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"}, "ar_AR": {"name_en": u"Arabic", "name": u"العربية"}, "bg_BG": {"name_en": u"Bulgarian", "name": u"БългарÑки"}, "bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"}, "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"}, "ca_ES": {"name_en": u"Catalan", "name": u"Català"}, "cs_CZ": {"name_en": u"Czech", "name": u"ÄŒeÅ¡tina"}, "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"}, "da_DK": {"name_en": u"Danish", "name": u"Dansk"}, "de_DE": {"name_en": u"German", "name": u"Deutsch"}, "el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"}, "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"}, "en_US": {"name_en": u"English (US)", "name": u"English (US)"}, "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"}, "es_LA": {"name_en": u"Spanish", "name": u"Español"}, "et_EE": {"name_en": u"Estonian", "name": u"Eesti"}, "eu_ES": {"name_en": u"Basque", "name": u"Euskara"}, "fa_IR": {"name_en": u"Persian", "name": u"ÙØ§Ø±Ø³ÛŒ"}, "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"}, "fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"}, "fr_FR": {"name_en": u"French", "name": u"Français"}, "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"}, "gl_ES": {"name_en": u"Galician", "name": u"Galego"}, "he_IL": {"name_en": u"Hebrew", "name": u"עברית"}, "hi_IN": {"name_en": u"Hindi", "name": u"हिनà¥à¤¦à¥€"}, "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"}, "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"}, "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"}, "is_IS": {"name_en": u"Icelandic", "name": u"Ãslenska"}, "it_IT": {"name_en": u"Italian", "name": u"Italiano"}, "ja_JP": {"name_en": u"Japanese", "name": u"日本語"}, "ko_KR": {"name_en": u"Korean", "name": u"한국어"}, "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"}, "lv_LV": {"name_en": u"Latvian", "name": u"LatvieÅ¡u"}, "mk_MK": {"name_en": u"Macedonian", "name": u"МакедонÑки"}, "ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"}, "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"}, "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmÃ¥l)"}, "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"}, "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"}, "pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"}, "pl_PL": {"name_en": u"Polish", "name": u"Polski"}, "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"}, "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"}, "ro_RO": {"name_en": u"Romanian", "name": u"Română"}, "ru_RU": {"name_en": u"Russian", "name": u"РуÑÑкий"}, "sk_SK": {"name_en": u"Slovak", "name": u"SlovenÄina"}, "sl_SI": {"name_en": u"Slovenian", "name": u"SlovenÅ¡Äina"}, "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"}, "sr_RS": {"name_en": u"Serbian", "name": u"СрпÑки"}, "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"}, "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"}, "ta_IN": {"name_en": u"Tamil", "name": u"தமிழà¯"}, "te_IN": {"name_en": u"Telugu", "name": u"తెలà±à°—à±"}, "th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"}, "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"}, "tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"}, "uk_UA": {"name_en": u"Ukraini ", "name": u"УкраїнÑька"}, "vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"}, "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"}, "zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(ç¹é«”)"}, } tornado-6.1.0/tornado/auth.py000066400000000000000000001317431374705040500161630ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This module contains implementations of various third-party authentication schemes. All the classes in this file are class mixins designed to be used with the `tornado.web.RequestHandler` class. They are used in two ways: * On a login handler, use methods such as ``authenticate_redirect()``, ``authorize_redirect()``, and ``get_authenticated_user()`` to establish the user's identity and store authentication tokens to your database and/or cookies. * In non-login handlers, use methods such as ``facebook_request()`` or ``twitter_request()`` to use the authentication tokens to make requests to the respective services. They all take slightly different arguments due to the fact all these services implement authentication and authorization slightly differently. See the individual service classes below for complete documentation. Example usage for Google OAuth: .. testcode:: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): async def get(self): if self.get_argument('code', False): user = await self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) # Save the user with e.g. set_secure_cookie else: self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) .. testoutput:: :hide: """ import base64 import binascii import hashlib import hmac import time import urllib.parse import uuid from tornado import httpclient from tornado import escape from tornado.httputil import url_concat from tornado.util import unicode_type from tornado.web import RequestHandler from typing import List, Any, Dict, cast, Iterable, Union, Optional class AuthError(Exception): pass class OpenIdMixin(object): """Abstract implementation of OpenID and Attribute Exchange. Class attributes: * ``_OPENID_ENDPOINT``: the identity provider's URI. """ def authenticate_redirect( self, callback_uri: Optional[str] = None, ax_attrs: List[str] = ["name", "email", "language", "username"], ) -> None: """Redirects to the authentication URL for this service. After authentication, the service will redirect back to the given callback URI with additional parameters including ``openid.mode``. We request the given attributes for the authenticated user by default (name, email, language, and username). If you don't need all those attributes for your app, you can request fewer with the ax_attrs keyword argument. .. versionchanged:: 6.0 The ``callback`` argument was removed and this method no longer returns an awaitable object. It is now an ordinary synchronous function. """ handler = cast(RequestHandler, self) callback_uri = callback_uri or handler.request.uri assert callback_uri is not None args = self._openid_args(callback_uri, ax_attrs=ax_attrs) endpoint = self._OPENID_ENDPOINT # type: ignore handler.redirect(endpoint + "?" + urllib.parse.urlencode(args)) async def get_authenticated_user( self, http_client: Optional[httpclient.AsyncHTTPClient] = None ) -> Dict[str, Any]: """Fetches the authenticated user data upon redirect. This method should be called by the handler that receives the redirect from the `authenticate_redirect()` method (which is often the same as the one that calls it; in that case you would call `get_authenticated_user` if the ``openid.mode`` parameter is present and `authenticate_redirect` if it is not). The result of this method will generally be used to set a cookie. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ handler = cast(RequestHandler, self) # Verify the OpenID response via direct request to the OP args = dict( (k, v[-1]) for k, v in handler.request.arguments.items() ) # type: Dict[str, Union[str, bytes]] args["openid.mode"] = u"check_authentication" url = self._OPENID_ENDPOINT # type: ignore if http_client is None: http_client = self.get_auth_http_client() resp = await http_client.fetch( url, method="POST", body=urllib.parse.urlencode(args) ) return self._on_authentication_verified(resp) def _openid_args( self, callback_uri: str, ax_attrs: Iterable[str] = [], oauth_scope: Optional[str] = None, ) -> Dict[str, str]: handler = cast(RequestHandler, self) url = urllib.parse.urljoin(handler.request.full_url(), callback_uri) args = { "openid.ns": "http://specs.openid.net/auth/2.0", "openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select", "openid.identity": "http://specs.openid.net/auth/2.0/identifier_select", "openid.return_to": url, "openid.realm": urllib.parse.urljoin(url, "/"), "openid.mode": "checkid_setup", } if ax_attrs: args.update( { "openid.ns.ax": "http://openid.net/srv/ax/1.0", "openid.ax.mode": "fetch_request", } ) ax_attrs = set(ax_attrs) required = [] # type: List[str] if "name" in ax_attrs: ax_attrs -= set(["name", "firstname", "fullname", "lastname"]) required += ["firstname", "fullname", "lastname"] args.update( { "openid.ax.type.firstname": "http://axschema.org/namePerson/first", "openid.ax.type.fullname": "http://axschema.org/namePerson", "openid.ax.type.lastname": "http://axschema.org/namePerson/last", } ) known_attrs = { "email": "http://axschema.org/contact/email", "language": "http://axschema.org/pref/language", "username": "http://axschema.org/namePerson/friendly", } for name in ax_attrs: args["openid.ax.type." + name] = known_attrs[name] required.append(name) args["openid.ax.required"] = ",".join(required) if oauth_scope: args.update( { "openid.ns.oauth": "http://specs.openid.net/extensions/oauth/1.0", "openid.oauth.consumer": handler.request.host.split(":")[0], "openid.oauth.scope": oauth_scope, } ) return args def _on_authentication_verified( self, response: httpclient.HTTPResponse ) -> Dict[str, Any]: handler = cast(RequestHandler, self) if b"is_valid:true" not in response.body: raise AuthError("Invalid OpenID response: %r" % response.body) # Make sure we got back at least an email from attribute exchange ax_ns = None for key in handler.request.arguments: if ( key.startswith("openid.ns.") and handler.get_argument(key) == u"http://openid.net/srv/ax/1.0" ): ax_ns = key[10:] break def get_ax_arg(uri: str) -> str: if not ax_ns: return u"" prefix = "openid." + ax_ns + ".type." ax_name = None for name in handler.request.arguments.keys(): if handler.get_argument(name) == uri and name.startswith(prefix): part = name[len(prefix) :] ax_name = "openid." + ax_ns + ".value." + part break if not ax_name: return u"" return handler.get_argument(ax_name, u"") email = get_ax_arg("http://axschema.org/contact/email") name = get_ax_arg("http://axschema.org/namePerson") first_name = get_ax_arg("http://axschema.org/namePerson/first") last_name = get_ax_arg("http://axschema.org/namePerson/last") username = get_ax_arg("http://axschema.org/namePerson/friendly") locale = get_ax_arg("http://axschema.org/pref/language").lower() user = dict() name_parts = [] if first_name: user["first_name"] = first_name name_parts.append(first_name) if last_name: user["last_name"] = last_name name_parts.append(last_name) if name: user["name"] = name elif name_parts: user["name"] = u" ".join(name_parts) elif email: user["name"] = email.split("@")[0] if email: user["email"] = email if locale: user["locale"] = locale if username: user["username"] = username claimed_id = handler.get_argument("openid.claimed_id", None) if claimed_id: user["claimed_id"] = claimed_id return user def get_auth_http_client(self) -> httpclient.AsyncHTTPClient: """Returns the `.AsyncHTTPClient` instance to be used for auth requests. May be overridden by subclasses to use an HTTP client other than the default. """ return httpclient.AsyncHTTPClient() class OAuthMixin(object): """Abstract implementation of OAuth 1.0 and 1.0a. See `TwitterMixin` below for an example implementation. Class attributes: * ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url. * ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url. * ``_OAUTH_VERSION``: May be either "1.0" or "1.0a". * ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires advance registration of callbacks. Subclasses must also override the `_oauth_get_user_future` and `_oauth_consumer_token` methods. """ async def authorize_redirect( self, callback_uri: Optional[str] = None, extra_params: Optional[Dict[str, Any]] = None, http_client: Optional[httpclient.AsyncHTTPClient] = None, ) -> None: """Redirects the user to obtain OAuth authorization for this service. The ``callback_uri`` may be omitted if you have previously registered a callback URI with the third-party service. For some services, you must use a previously-registered callback URI and cannot specify a callback via this method. This method sets a cookie called ``_oauth_request_token`` which is subsequently used (and cleared) in `get_authenticated_user` for security purposes. This method is asynchronous and must be called with ``await`` or ``yield`` (This is different from other ``auth*_redirect`` methods defined in this module). It calls `.RequestHandler.finish` for you so you should not write any other response after it returns. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False): raise Exception("This service does not support oauth_callback") if http_client is None: http_client = self.get_auth_http_client() assert http_client is not None if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": response = await http_client.fetch( self._oauth_request_token_url( callback_uri=callback_uri, extra_params=extra_params ) ) else: response = await http_client.fetch(self._oauth_request_token_url()) url = self._OAUTH_AUTHORIZE_URL # type: ignore self._on_request_token(url, callback_uri, response) async def get_authenticated_user( self, http_client: Optional[httpclient.AsyncHTTPClient] = None ) -> Dict[str, Any]: """Gets the OAuth authorized user and access token. This method should be called from the handler for your OAuth callback URL to complete the registration process. We run the callback with the authenticated user dictionary. This dictionary will contain an ``access_key`` which can be used to make authorized requests to this service on behalf of the user. The dictionary will also contain other fields such as ``name``, depending on the service used. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ handler = cast(RequestHandler, self) request_key = escape.utf8(handler.get_argument("oauth_token")) oauth_verifier = handler.get_argument("oauth_verifier", None) request_cookie = handler.get_cookie("_oauth_request_token") if not request_cookie: raise AuthError("Missing OAuth request token cookie") handler.clear_cookie("_oauth_request_token") cookie_key, cookie_secret = [ base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|") ] if cookie_key != request_key: raise AuthError("Request token does not match cookie") token = dict( key=cookie_key, secret=cookie_secret ) # type: Dict[str, Union[str, bytes]] if oauth_verifier: token["verifier"] = oauth_verifier if http_client is None: http_client = self.get_auth_http_client() assert http_client is not None response = await http_client.fetch(self._oauth_access_token_url(token)) access_token = _oauth_parse_response(response.body) user = await self._oauth_get_user_future(access_token) if not user: raise AuthError("Error getting user") user["access_token"] = access_token return user def _oauth_request_token_url( self, callback_uri: Optional[str] = None, extra_params: Optional[Dict[str, Any]] = None, ) -> str: handler = cast(RequestHandler, self) consumer_token = self._oauth_consumer_token() url = self._OAUTH_REQUEST_TOKEN_URL # type: ignore args = dict( oauth_consumer_key=escape.to_basestring(consumer_token["key"]), oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), oauth_version="1.0", ) if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": if callback_uri == "oob": args["oauth_callback"] = "oob" elif callback_uri: args["oauth_callback"] = urllib.parse.urljoin( handler.request.full_url(), callback_uri ) if extra_params: args.update(extra_params) signature = _oauth10a_signature(consumer_token, "GET", url, args) else: signature = _oauth_signature(consumer_token, "GET", url, args) args["oauth_signature"] = signature return url + "?" + urllib.parse.urlencode(args) def _on_request_token( self, authorize_url: str, callback_uri: Optional[str], response: httpclient.HTTPResponse, ) -> None: handler = cast(RequestHandler, self) request_token = _oauth_parse_response(response.body) data = ( base64.b64encode(escape.utf8(request_token["key"])) + b"|" + base64.b64encode(escape.utf8(request_token["secret"])) ) handler.set_cookie("_oauth_request_token", data) args = dict(oauth_token=request_token["key"]) if callback_uri == "oob": handler.finish(authorize_url + "?" + urllib.parse.urlencode(args)) return elif callback_uri: args["oauth_callback"] = urllib.parse.urljoin( handler.request.full_url(), callback_uri ) handler.redirect(authorize_url + "?" + urllib.parse.urlencode(args)) def _oauth_access_token_url(self, request_token: Dict[str, Any]) -> str: consumer_token = self._oauth_consumer_token() url = self._OAUTH_ACCESS_TOKEN_URL # type: ignore args = dict( oauth_consumer_key=escape.to_basestring(consumer_token["key"]), oauth_token=escape.to_basestring(request_token["key"]), oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), oauth_version="1.0", ) if "verifier" in request_token: args["oauth_verifier"] = request_token["verifier"] if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": signature = _oauth10a_signature( consumer_token, "GET", url, args, request_token ) else: signature = _oauth_signature( consumer_token, "GET", url, args, request_token ) args["oauth_signature"] = signature return url + "?" + urllib.parse.urlencode(args) def _oauth_consumer_token(self) -> Dict[str, Any]: """Subclasses must override this to return their OAuth consumer keys. The return value should be a `dict` with keys ``key`` and ``secret``. """ raise NotImplementedError() async def _oauth_get_user_future( self, access_token: Dict[str, Any] ) -> Dict[str, Any]: """Subclasses must override this to get basic information about the user. Should be a coroutine whose result is a dictionary containing information about the user, which may have been retrieved by using ``access_token`` to make a request to the service. The access token will be added to the returned dictionary to make the result of `get_authenticated_user`. .. versionchanged:: 5.1 Subclasses may also define this method with ``async def``. .. versionchanged:: 6.0 A synchronous fallback to ``_oauth_get_user`` was removed. """ raise NotImplementedError() def _oauth_request_parameters( self, url: str, access_token: Dict[str, Any], parameters: Dict[str, Any] = {}, method: str = "GET", ) -> Dict[str, Any]: """Returns the OAuth parameters as a dict for the given request. parameters should include all POST arguments and query string arguments that will be sent with the request. """ consumer_token = self._oauth_consumer_token() base_args = dict( oauth_consumer_key=escape.to_basestring(consumer_token["key"]), oauth_token=escape.to_basestring(access_token["key"]), oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), oauth_version="1.0", ) args = {} args.update(base_args) args.update(parameters) if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": signature = _oauth10a_signature( consumer_token, method, url, args, access_token ) else: signature = _oauth_signature( consumer_token, method, url, args, access_token ) base_args["oauth_signature"] = escape.to_basestring(signature) return base_args def get_auth_http_client(self) -> httpclient.AsyncHTTPClient: """Returns the `.AsyncHTTPClient` instance to be used for auth requests. May be overridden by subclasses to use an HTTP client other than the default. """ return httpclient.AsyncHTTPClient() class OAuth2Mixin(object): """Abstract implementation of OAuth 2.0. See `FacebookGraphMixin` or `GoogleOAuth2Mixin` below for example implementations. Class attributes: * ``_OAUTH_AUTHORIZE_URL``: The service's authorization url. * ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url. """ def authorize_redirect( self, redirect_uri: Optional[str] = None, client_id: Optional[str] = None, client_secret: Optional[str] = None, extra_params: Optional[Dict[str, Any]] = None, scope: Optional[List[str]] = None, response_type: str = "code", ) -> None: """Redirects the user to obtain OAuth authorization for this service. Some providers require that you register a redirect URL with your application instead of passing one via this method. You should call this method to log the user in, and then call ``get_authenticated_user`` in the handler for your redirect URL to complete the authorization process. .. versionchanged:: 6.0 The ``callback`` argument and returned awaitable were removed; this is now an ordinary synchronous function. """ handler = cast(RequestHandler, self) args = {"response_type": response_type} if redirect_uri is not None: args["redirect_uri"] = redirect_uri if client_id is not None: args["client_id"] = client_id if extra_params: args.update(extra_params) if scope: args["scope"] = " ".join(scope) url = self._OAUTH_AUTHORIZE_URL # type: ignore handler.redirect(url_concat(url, args)) def _oauth_request_token_url( self, redirect_uri: Optional[str] = None, client_id: Optional[str] = None, client_secret: Optional[str] = None, code: Optional[str] = None, extra_params: Optional[Dict[str, Any]] = None, ) -> str: url = self._OAUTH_ACCESS_TOKEN_URL # type: ignore args = {} # type: Dict[str, str] if redirect_uri is not None: args["redirect_uri"] = redirect_uri if code is not None: args["code"] = code if client_id is not None: args["client_id"] = client_id if client_secret is not None: args["client_secret"] = client_secret if extra_params: args.update(extra_params) return url_concat(url, args) async def oauth2_request( self, url: str, access_token: Optional[str] = None, post_args: Optional[Dict[str, Any]] = None, **args: Any ) -> Any: """Fetches the given URL auth an OAuth2 access token. If the request is a POST, ``post_args`` should be provided. Query string arguments should be given as keyword arguments. Example usage: ..testcode:: class MainHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): @tornado.web.authenticated async def get(self): new_entry = await self.oauth2_request( "https://graph.facebook.com/me/feed", post_args={"message": "I am posting from my Tornado application!"}, access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? self.authorize_redirect() return self.finish("Posted a message!") .. testoutput:: :hide: .. versionadded:: 4.3 .. versionchanged::: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ all_args = {} if access_token: all_args["access_token"] = access_token all_args.update(args) if all_args: url += "?" + urllib.parse.urlencode(all_args) http = self.get_auth_http_client() if post_args is not None: response = await http.fetch( url, method="POST", body=urllib.parse.urlencode(post_args) ) else: response = await http.fetch(url) return escape.json_decode(response.body) def get_auth_http_client(self) -> httpclient.AsyncHTTPClient: """Returns the `.AsyncHTTPClient` instance to be used for auth requests. May be overridden by subclasses to use an HTTP client other than the default. .. versionadded:: 4.3 """ return httpclient.AsyncHTTPClient() class TwitterMixin(OAuthMixin): """Twitter OAuth authentication. To authenticate with Twitter, register your application with Twitter at http://twitter.com/apps. Then copy your Consumer Key and Consumer Secret to the application `~tornado.web.Application.settings` ``twitter_consumer_key`` and ``twitter_consumer_secret``. Use this mixin on the handler for the URL you registered as your application's callback URL. When your application is set up, you can use this mixin like this to authenticate the user with Twitter and get access to their stream: .. testcode:: class TwitterLoginHandler(tornado.web.RequestHandler, tornado.auth.TwitterMixin): async def get(self): if self.get_argument("oauth_token", None): user = await self.get_authenticated_user() # Save the user using e.g. set_secure_cookie() else: await self.authorize_redirect() .. testoutput:: :hide: The user object returned by `~OAuthMixin.get_authenticated_user` includes the attributes ``username``, ``name``, ``access_token``, and all of the custom Twitter user attributes described at https://dev.twitter.com/docs/api/1.1/get/users/show """ _OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token" _OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token" _OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize" _OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate" _OAUTH_NO_CALLBACKS = False _TWITTER_BASE_URL = "https://api.twitter.com/1.1" async def authenticate_redirect(self, callback_uri: Optional[str] = None) -> None: """Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. This is generally the right interface to use if you are using Twitter for single-sign on. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ http = self.get_auth_http_client() response = await http.fetch( self._oauth_request_token_url(callback_uri=callback_uri) ) self._on_request_token(self._OAUTH_AUTHENTICATE_URL, None, response) async def twitter_request( self, path: str, access_token: Dict[str, Any], post_args: Optional[Dict[str, Any]] = None, **args: Any ) -> Any: """Fetches the given API path, e.g., ``statuses/user_timeline/btaylor`` The path should not include the format or API version number. (we automatically use JSON format and API version 1). If the request is a POST, ``post_args`` should be provided. Query string arguments should be given as keyword arguments. All the Twitter methods are documented at http://dev.twitter.com/ Many methods require an OAuth access token which you can obtain through `~OAuthMixin.authorize_redirect` and `~OAuthMixin.get_authenticated_user`. The user returned through that process includes an 'access_token' attribute that can be used to make authenticated requests via this method. Example usage: .. testcode:: class MainHandler(tornado.web.RequestHandler, tornado.auth.TwitterMixin): @tornado.web.authenticated async def get(self): new_entry = await self.twitter_request( "/statuses/update", post_args={"status": "Testing Tornado Web Server"}, access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? await self.authorize_redirect() return self.finish("Posted a message!") .. testoutput:: :hide: .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ if path.startswith("http:") or path.startswith("https:"): # Raw urls are useful for e.g. search which doesn't follow the # usual pattern: http://search.twitter.com/search.json url = path else: url = self._TWITTER_BASE_URL + path + ".json" # Add the OAuth resource request signature if we have credentials if access_token: all_args = {} all_args.update(args) all_args.update(post_args or {}) method = "POST" if post_args is not None else "GET" oauth = self._oauth_request_parameters( url, access_token, all_args, method=method ) args.update(oauth) if args: url += "?" + urllib.parse.urlencode(args) http = self.get_auth_http_client() if post_args is not None: response = await http.fetch( url, method="POST", body=urllib.parse.urlencode(post_args) ) else: response = await http.fetch(url) return escape.json_decode(response.body) def _oauth_consumer_token(self) -> Dict[str, Any]: handler = cast(RequestHandler, self) handler.require_setting("twitter_consumer_key", "Twitter OAuth") handler.require_setting("twitter_consumer_secret", "Twitter OAuth") return dict( key=handler.settings["twitter_consumer_key"], secret=handler.settings["twitter_consumer_secret"], ) async def _oauth_get_user_future( self, access_token: Dict[str, Any] ) -> Dict[str, Any]: user = await self.twitter_request( "/account/verify_credentials", access_token=access_token ) if user: user["username"] = user["screen_name"] return user class GoogleOAuth2Mixin(OAuth2Mixin): """Google authentication using OAuth2. In order to use, register your application with Google and copy the relevant parameters to your application settings. * Go to the Google Dev Console at http://console.developers.google.com * Select a project, or create a new one. * In the sidebar on the left, select APIs & Auth. * In the list of APIs, find the Google+ API service and set it to ON. * In the sidebar on the left, select Credentials. * In the OAuth section of the page, select Create New Client ID. * Set the Redirect URI to point to your auth handler * Copy the "Client secret" and "Client ID" to the application settings as ``{"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}}`` .. versionadded:: 3.2 """ _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/v2/auth" _OAUTH_ACCESS_TOKEN_URL = "https://www.googleapis.com/oauth2/v4/token" _OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo" _OAUTH_NO_CALLBACKS = False _OAUTH_SETTINGS_KEY = "google_oauth" async def get_authenticated_user( self, redirect_uri: str, code: str ) -> Dict[str, Any]: """Handles the login for the Google user, returning an access token. The result is a dictionary containing an ``access_token`` field ([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)). Unlike other ``get_authenticated_user`` methods in this package, this method does not return any additional information about the user. The returned access token can be used with `OAuth2Mixin.oauth2_request` to request additional information (perhaps from ``https://www.googleapis.com/oauth2/v2/userinfo``) Example usage: .. testcode:: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): async def get(self): if self.get_argument('code', False): access = await self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) user = await self.oauth2_request( "https://www.googleapis.com/oauth2/v1/userinfo", access_token=access["access_token"]) # Save the user and access token with # e.g. set_secure_cookie. else: self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) .. testoutput:: :hide: .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ # noqa: E501 handler = cast(RequestHandler, self) http = self.get_auth_http_client() body = urllib.parse.urlencode( { "redirect_uri": redirect_uri, "code": code, "client_id": handler.settings[self._OAUTH_SETTINGS_KEY]["key"], "client_secret": handler.settings[self._OAUTH_SETTINGS_KEY]["secret"], "grant_type": "authorization_code", } ) response = await http.fetch( self._OAUTH_ACCESS_TOKEN_URL, method="POST", headers={"Content-Type": "application/x-www-form-urlencoded"}, body=body, ) return escape.json_decode(response.body) class FacebookGraphMixin(OAuth2Mixin): """Facebook authentication using the new Graph API and OAuth2.""" _OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?" _OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?" _OAUTH_NO_CALLBACKS = False _FACEBOOK_BASE_URL = "https://graph.facebook.com" async def get_authenticated_user( self, redirect_uri: str, client_id: str, client_secret: str, code: str, extra_fields: Optional[Dict[str, Any]] = None, ) -> Optional[Dict[str, Any]]: """Handles the login for the Facebook user, returning a user object. Example usage: .. testcode:: class FacebookGraphLoginHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): async def get(self): if self.get_argument("code", False): user = await self.get_authenticated_user( redirect_uri='/auth/facebookgraph/', client_id=self.settings["facebook_api_key"], client_secret=self.settings["facebook_secret"], code=self.get_argument("code")) # Save the user with e.g. set_secure_cookie else: self.authorize_redirect( redirect_uri='/auth/facebookgraph/', client_id=self.settings["facebook_api_key"], extra_params={"scope": "read_stream,offline_access"}) .. testoutput:: :hide: This method returns a dictionary which may contain the following fields: * ``access_token``, a string which may be passed to `facebook_request` * ``session_expires``, an integer encoded as a string representing the time until the access token expires in seconds. This field should be used like ``int(user['session_expires'])``; in a future version of Tornado it will change from a string to an integer. * ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``, ``link``, plus any fields named in the ``extra_fields`` argument. These fields are copied from the Facebook graph API `user object `_ .. versionchanged:: 4.5 The ``session_expires`` field was updated to support changes made to the Facebook API in March 2017. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ http = self.get_auth_http_client() args = { "redirect_uri": redirect_uri, "code": code, "client_id": client_id, "client_secret": client_secret, } fields = set( ["id", "name", "first_name", "last_name", "locale", "picture", "link"] ) if extra_fields: fields.update(extra_fields) response = await http.fetch( self._oauth_request_token_url(**args) # type: ignore ) args = escape.json_decode(response.body) session = { "access_token": args.get("access_token"), "expires_in": args.get("expires_in"), } assert session["access_token"] is not None user = await self.facebook_request( path="/me", access_token=session["access_token"], appsecret_proof=hmac.new( key=client_secret.encode("utf8"), msg=session["access_token"].encode("utf8"), digestmod=hashlib.sha256, ).hexdigest(), fields=",".join(fields), ) if user is None: return None fieldmap = {} for field in fields: fieldmap[field] = user.get(field) # session_expires is converted to str for compatibility with # older versions in which the server used url-encoding and # this code simply returned the string verbatim. # This should change in Tornado 5.0. fieldmap.update( { "access_token": session["access_token"], "session_expires": str(session.get("expires_in")), } ) return fieldmap async def facebook_request( self, path: str, access_token: Optional[str] = None, post_args: Optional[Dict[str, Any]] = None, **args: Any ) -> Any: """Fetches the given relative API path, e.g., "/btaylor/picture" If the request is a POST, ``post_args`` should be provided. Query string arguments should be given as keyword arguments. An introduction to the Facebook Graph API can be found at http://developers.facebook.com/docs/api Many methods require an OAuth access token which you can obtain through `~OAuth2Mixin.authorize_redirect` and `get_authenticated_user`. The user returned through that process includes an ``access_token`` attribute that can be used to make authenticated requests via this method. Example usage: .. testcode:: class MainHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): @tornado.web.authenticated async def get(self): new_entry = await self.facebook_request( "/me/feed", post_args={"message": "I am posting from my Tornado application!"}, access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? self.authorize_redirect() return self.finish("Posted a message!") .. testoutput:: :hide: The given path is relative to ``self._FACEBOOK_BASE_URL``, by default "https://graph.facebook.com". This method is a wrapper around `OAuth2Mixin.oauth2_request`; the only difference is that this method takes a relative path, while ``oauth2_request`` takes a complete url. .. versionchanged:: 3.1 Added the ability to override ``self._FACEBOOK_BASE_URL``. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ url = self._FACEBOOK_BASE_URL + path return await self.oauth2_request( url, access_token=access_token, post_args=post_args, **args ) def _oauth_signature( consumer_token: Dict[str, Any], method: str, url: str, parameters: Dict[str, Any] = {}, token: Optional[Dict[str, Any]] = None, ) -> bytes: """Calculates the HMAC-SHA1 OAuth signature for the given request. See http://oauth.net/core/1.0/#signing_process """ parts = urllib.parse.urlparse(url) scheme, netloc, path = parts[:3] normalized_url = scheme.lower() + "://" + netloc.lower() + path base_elems = [] base_elems.append(method.upper()) base_elems.append(normalized_url) base_elems.append( "&".join( "%s=%s" % (k, _oauth_escape(str(v))) for k, v in sorted(parameters.items()) ) ) base_string = "&".join(_oauth_escape(e) for e in base_elems) key_elems = [escape.utf8(consumer_token["secret"])] key_elems.append(escape.utf8(token["secret"] if token else "")) key = b"&".join(key_elems) hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) return binascii.b2a_base64(hash.digest())[:-1] def _oauth10a_signature( consumer_token: Dict[str, Any], method: str, url: str, parameters: Dict[str, Any] = {}, token: Optional[Dict[str, Any]] = None, ) -> bytes: """Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request. See http://oauth.net/core/1.0a/#signing_process """ parts = urllib.parse.urlparse(url) scheme, netloc, path = parts[:3] normalized_url = scheme.lower() + "://" + netloc.lower() + path base_elems = [] base_elems.append(method.upper()) base_elems.append(normalized_url) base_elems.append( "&".join( "%s=%s" % (k, _oauth_escape(str(v))) for k, v in sorted(parameters.items()) ) ) base_string = "&".join(_oauth_escape(e) for e in base_elems) key_elems = [escape.utf8(urllib.parse.quote(consumer_token["secret"], safe="~"))] key_elems.append( escape.utf8(urllib.parse.quote(token["secret"], safe="~") if token else "") ) key = b"&".join(key_elems) hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) return binascii.b2a_base64(hash.digest())[:-1] def _oauth_escape(val: Union[str, bytes]) -> str: if isinstance(val, unicode_type): val = val.encode("utf-8") return urllib.parse.quote(val, safe="~") def _oauth_parse_response(body: bytes) -> Dict[str, Any]: # I can't find an officially-defined encoding for oauth responses and # have never seen anyone use non-ascii. Leave the response in a byte # string for python 2, and use utf8 on python 3. body_str = escape.native_str(body) p = urllib.parse.parse_qs(body_str, keep_blank_values=False) token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0]) # Add the extra parameters the Provider included to the token special = ("oauth_token", "oauth_token_secret") token.update((k, p[k][0]) for k in p if k not in special) return token tornado-6.1.0/tornado/autoreload.py000066400000000000000000000325241374705040500173560ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Automatically restart the server when a source file is modified. Most applications should not access this module directly. Instead, pass the keyword argument ``autoreload=True`` to the `tornado.web.Application` constructor (or ``debug=True``, which enables this setting and several others). This will enable autoreload mode as well as checking for changes to templates and static resources. Note that restarting is a destructive operation and any requests in progress will be aborted when the process restarts. (If you want to disable autoreload while using other debug-mode features, pass both ``debug=True`` and ``autoreload=False``). This module can also be used as a command-line wrapper around scripts such as unit test runners. See the `main` method for details. The command-line wrapper and Application debug modes can be used together. This combination is encouraged as the wrapper catches syntax errors and other import-time failures, while debug mode catches changes once the server has started. This module will not work correctly when `.HTTPServer`'s multi-process mode is used. Reloading loses any Python interpreter command-line arguments (e.g. ``-u``) because it re-executes Python using ``sys.executable`` and ``sys.argv``. Additionally, modifying these variables will cause reloading to behave incorrectly. """ import os import sys # sys.path handling # ----------------- # # If a module is run with "python -m", the current directory (i.e. "") # is automatically prepended to sys.path, but not if it is run as # "path/to/file.py". The processing for "-m" rewrites the former to # the latter, so subsequent executions won't have the same path as the # original. # # Conversely, when run as path/to/file.py, the directory containing # file.py gets added to the path, which can cause confusion as imports # may become relative in spite of the future import. # # We address the former problem by reconstructing the original command # line (Python >= 3.4) or by setting the $PYTHONPATH environment # variable (Python < 3.4) before re-execution so the new process will # see the correct path. We attempt to address the latter problem when # tornado.autoreload is run as __main__. if __name__ == "__main__": # This sys.path manipulation must come before our imports (as much # as possible - if we introduced a tornado.sys or tornado.os # module we'd be in trouble), or else our imports would become # relative again despite the future import. # # There is a separate __main__ block at the end of the file to call main(). if sys.path[0] == os.path.dirname(__file__): del sys.path[0] import functools import logging import os import pkgutil # type: ignore import sys import traceback import types import subprocess import weakref from tornado import ioloop from tornado.log import gen_log from tornado import process from tornado.util import exec_in try: import signal except ImportError: signal = None # type: ignore import typing from typing import Callable, Dict if typing.TYPE_CHECKING: from typing import List, Optional, Union # noqa: F401 # os.execv is broken on Windows and can't properly parse command line # arguments and executable name if they contain whitespaces. subprocess # fixes that behavior. _has_execv = sys.platform != "win32" _watched_files = set() _reload_hooks = [] _reload_attempted = False _io_loops = weakref.WeakKeyDictionary() # type: ignore _autoreload_is_main = False _original_argv = None # type: Optional[List[str]] _original_spec = None def start(check_time: int = 500) -> None: """Begins watching source files for changes. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ io_loop = ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning("tornado.autoreload started more than once in the same process") modify_times = {} # type: Dict[str, float] callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time) scheduler.start() def wait() -> None: """Wait for a watched file to change, then restart the process. Intended to be used at the end of scripts like unit test runners, to run the tests again after any source file changes (but see also the command-line interface in `main`) """ io_loop = ioloop.IOLoop() io_loop.add_callback(start) io_loop.start() def watch(filename: str) -> None: """Add a file to the watch list. All imported modules are watched by default. """ _watched_files.add(filename) def add_reload_hook(fn: Callable[[], None]) -> None: """Add a function to be called before reloading the process. Note that for open file and socket handles it is generally preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or `os.set_inheritable`) instead of using a reload hook to close them. """ _reload_hooks.append(fn) def _reload_on_update(modify_times: Dict[str, float]) -> None: if _reload_attempted: # We already tried to reload and it didn't work, so don't try again. return if process.task_id() is not None: # We're in a child process created by fork_processes. If child # processes restarted themselves, they'd all restart and then # all call fork_processes again. return for module in list(sys.modules.values()): # Some modules play games with sys.modules (e.g. email/__init__.py # in the standard library), and occasionally this can cause strange # failures in getattr. Just ignore anything that's not an ordinary # module. if not isinstance(module, types.ModuleType): continue path = getattr(module, "__file__", None) if not path: continue if path.endswith(".pyc") or path.endswith(".pyo"): path = path[:-1] _check_file(modify_times, path) for path in _watched_files: _check_file(modify_times, path) def _check_file(modify_times: Dict[str, float], path: str) -> None: try: modified = os.stat(path).st_mtime except Exception: return if path not in modify_times: modify_times[path] = modified return if modify_times[path] != modified: gen_log.info("%s modified; restarting server", path) _reload() def _reload() -> None: global _reload_attempted _reload_attempted = True for fn in _reload_hooks: fn() if hasattr(signal, "setitimer"): # Clear the alarm signal set by # ioloop.set_blocking_log_threshold so it doesn't fire # after the exec. signal.setitimer(signal.ITIMER_REAL, 0, 0) # sys.path fixes: see comments at top of file. If __main__.__spec__ # exists, we were invoked with -m and the effective path is about to # change on re-exec. Reconstruct the original command line to # ensure that the new process sees the same path we did. If # __spec__ is not available (Python < 3.4), check instead if # sys.path[0] is an empty string and add the current directory to # $PYTHONPATH. if _autoreload_is_main: assert _original_argv is not None spec = _original_spec argv = _original_argv else: spec = getattr(sys.modules["__main__"], "__spec__", None) argv = sys.argv if spec: argv = ["-m", spec.name] + argv[1:] else: path_prefix = "." + os.pathsep if sys.path[0] == "" and not os.environ.get("PYTHONPATH", "").startswith( path_prefix ): os.environ["PYTHONPATH"] = path_prefix + os.environ.get("PYTHONPATH", "") if not _has_execv: subprocess.Popen([sys.executable] + argv) os._exit(0) else: try: os.execv(sys.executable, [sys.executable] + argv) except OSError: # Mac OS X versions prior to 10.6 do not support execv in # a process that contains multiple threads. Instead of # re-executing in the current process, start a new one # and cause the current process to exit. This isn't # ideal since the new process is detached from the parent # terminal and thus cannot easily be killed with ctrl-C, # but it's better than not being able to autoreload at # all. # Unfortunately the errno returned in this case does not # appear to be consistent, so we can't easily check for # this error specifically. os.spawnv( os.P_NOWAIT, sys.executable, [sys.executable] + argv # type: ignore ) # At this point the IOLoop has been closed and finally # blocks will experience errors if we allow the stack to # unwind, so just exit uncleanly. os._exit(0) _USAGE = """\ Usage: python -m tornado.autoreload -m module.to.run [args...] python -m tornado.autoreload path/to/script.py [args...] """ def main() -> None: """Command-line wrapper to re-run a script whenever its source changes. Scripts may be specified by filename or module name:: python -m tornado.autoreload -m tornado.test.runtests python -m tornado.autoreload tornado/test/runtests.py Running a script with this wrapper is similar to calling `tornado.autoreload.wait` at the end of the script, but this wrapper can catch import-time problems like syntax errors that would otherwise prevent the script from reaching its call to `wait`. """ # Remember that we were launched with autoreload as main. # The main module can be tricky; set the variables both in our globals # (which may be __main__) and the real importable version. import tornado.autoreload global _autoreload_is_main global _original_argv, _original_spec tornado.autoreload._autoreload_is_main = _autoreload_is_main = True original_argv = sys.argv tornado.autoreload._original_argv = _original_argv = original_argv original_spec = getattr(sys.modules["__main__"], "__spec__", None) tornado.autoreload._original_spec = _original_spec = original_spec sys.argv = sys.argv[:] if len(sys.argv) >= 3 and sys.argv[1] == "-m": mode = "module" module = sys.argv[2] del sys.argv[1:3] elif len(sys.argv) >= 2: mode = "script" script = sys.argv[1] sys.argv = sys.argv[1:] else: print(_USAGE, file=sys.stderr) sys.exit(1) try: if mode == "module": import runpy runpy.run_module(module, run_name="__main__", alter_sys=True) elif mode == "script": with open(script) as f: # Execute the script in our namespace instead of creating # a new one so that something that tries to import __main__ # (e.g. the unittest module) will see names defined in the # script instead of just those defined in this module. global __file__ __file__ = script # If __package__ is defined, imports may be incorrectly # interpreted as relative to this module. global __package__ del __package__ exec_in(f.read(), globals(), globals()) except SystemExit as e: logging.basicConfig() gen_log.info("Script exited with status %s", e.code) except Exception as e: logging.basicConfig() gen_log.warning("Script exited with uncaught exception", exc_info=True) # If an exception occurred at import time, the file with the error # never made it into sys.modules and so we won't know to watch it. # Just to make sure we've covered everything, walk the stack trace # from the exception and watch every file. for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]): watch(filename) if isinstance(e, SyntaxError): # SyntaxErrors are special: their innermost stack frame is fake # so extract_tb won't see it and we have to get the filename # from the exception object. watch(e.filename) else: logging.basicConfig() gen_log.info("Script exited normally") # restore sys.argv so subsequent executions will include autoreload sys.argv = original_argv if mode == "module": # runpy did a fake import of the module as __main__, but now it's # no longer in sys.modules. Figure out where it is and watch it. loader = pkgutil.get_loader(module) if loader is not None: watch(loader.get_filename()) # type: ignore wait() if __name__ == "__main__": # See also the other __main__ block at the top of the file, which modifies # sys.path before our imports main() tornado-6.1.0/tornado/concurrent.py000066400000000000000000000176541374705040500174100ustar00rootroot00000000000000# # Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for working with ``Future`` objects. Tornado previously provided its own ``Future`` class, but now uses `asyncio.Future`. This module contains utility functions for working with `asyncio.Future` in a way that is backwards-compatible with Tornado's old ``Future`` implementation. While this module is an important part of Tornado's internal implementation, applications rarely need to interact with it directly. """ import asyncio from concurrent import futures import functools import sys import types from tornado.log import app_log import typing from typing import Any, Callable, Optional, Tuple, Union _T = typing.TypeVar("_T") class ReturnValueIgnoredError(Exception): # No longer used; was previously used by @return_future pass Future = asyncio.Future FUTURES = (futures.Future, Future) def is_future(x: Any) -> bool: return isinstance(x, FUTURES) class DummyExecutor(futures.Executor): def submit( self, fn: Callable[..., _T], *args: Any, **kwargs: Any ) -> "futures.Future[_T]": future = futures.Future() # type: futures.Future[_T] try: future_set_result_unless_cancelled(future, fn(*args, **kwargs)) except Exception: future_set_exc_info(future, sys.exc_info()) return future def shutdown(self, wait: bool = True) -> None: pass dummy_executor = DummyExecutor() def run_on_executor(*args: Any, **kwargs: Any) -> Callable: """Decorator to run a synchronous method asynchronously on an executor. Returns a future. The executor to be used is determined by the ``executor`` attributes of ``self``. To use a different attribute name, pass a keyword argument to the decorator:: @run_on_executor(executor='_thread_pool') def foo(self): pass This decorator should not be confused with the similarly-named `.IOLoop.run_in_executor`. In general, using ``run_in_executor`` when *calling* a blocking method is recommended instead of using this decorator when *defining* a method. If compatibility with older versions of Tornado is required, consider defining an executor and using ``executor.submit()`` at the call site. .. versionchanged:: 4.2 Added keyword arguments to use alternative attributes. .. versionchanged:: 5.0 Always uses the current IOLoop instead of ``self.io_loop``. .. versionchanged:: 5.1 Returns a `.Future` compatible with ``await`` instead of a `concurrent.futures.Future`. .. deprecated:: 5.1 The ``callback`` argument is deprecated and will be removed in 6.0. The decorator itself is discouraged in new code but will not be removed in 6.0. .. versionchanged:: 6.0 The ``callback`` argument was removed. """ # Fully type-checking decorators is tricky, and this one is # discouraged anyway so it doesn't have all the generic magic. def run_on_executor_decorator(fn: Callable) -> Callable[..., Future]: executor = kwargs.get("executor", "executor") @functools.wraps(fn) def wrapper(self: Any, *args: Any, **kwargs: Any) -> Future: async_future = Future() # type: Future conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs) chain_future(conc_future, async_future) return async_future return wrapper if args and kwargs: raise ValueError("cannot combine positional and keyword args") if len(args) == 1: return run_on_executor_decorator(args[0]) elif len(args) != 0: raise ValueError("expected 1 argument, got %d", len(args)) return run_on_executor_decorator _NO_RESULT = object() def chain_future(a: "Future[_T]", b: "Future[_T]") -> None: """Chain two futures together so that when one completes, so does the other. The result (success or failure) of ``a`` will be copied to ``b``, unless ``b`` has already been completed or cancelled by the time ``a`` finishes. .. versionchanged:: 5.0 Now accepts both Tornado/asyncio `Future` objects and `concurrent.futures.Future`. """ def copy(future: "Future[_T]") -> None: assert future is a if b.done(): return if hasattr(a, "exc_info") and a.exc_info() is not None: # type: ignore future_set_exc_info(b, a.exc_info()) # type: ignore elif a.exception() is not None: b.set_exception(a.exception()) else: b.set_result(a.result()) if isinstance(a, Future): future_add_done_callback(a, copy) else: # concurrent.futures.Future from tornado.ioloop import IOLoop IOLoop.current().add_future(a, copy) def future_set_result_unless_cancelled( future: "Union[futures.Future[_T], Future[_T]]", value: _T ) -> None: """Set the given ``value`` as the `Future`'s result, if not cancelled. Avoids ``asyncio.InvalidStateError`` when calling ``set_result()`` on a cancelled `asyncio.Future`. .. versionadded:: 5.0 """ if not future.cancelled(): future.set_result(value) def future_set_exception_unless_cancelled( future: "Union[futures.Future[_T], Future[_T]]", exc: BaseException ) -> None: """Set the given ``exc`` as the `Future`'s exception. If the Future is already canceled, logs the exception instead. If this logging is not desired, the caller should explicitly check the state of the Future and call ``Future.set_exception`` instead of this wrapper. Avoids ``asyncio.InvalidStateError`` when calling ``set_exception()`` on a cancelled `asyncio.Future`. .. versionadded:: 6.0 """ if not future.cancelled(): future.set_exception(exc) else: app_log.error("Exception after Future was cancelled", exc_info=exc) def future_set_exc_info( future: "Union[futures.Future[_T], Future[_T]]", exc_info: Tuple[ Optional[type], Optional[BaseException], Optional[types.TracebackType] ], ) -> None: """Set the given ``exc_info`` as the `Future`'s exception. Understands both `asyncio.Future` and the extensions in older versions of Tornado to enable better tracebacks on Python 2. .. versionadded:: 5.0 .. versionchanged:: 6.0 If the future is already cancelled, this function is a no-op. (previously ``asyncio.InvalidStateError`` would be raised) """ if exc_info[1] is None: raise Exception("future_set_exc_info called with no exception") future_set_exception_unless_cancelled(future, exc_info[1]) @typing.overload def future_add_done_callback( future: "futures.Future[_T]", callback: Callable[["futures.Future[_T]"], None] ) -> None: pass @typing.overload # noqa: F811 def future_add_done_callback( future: "Future[_T]", callback: Callable[["Future[_T]"], None] ) -> None: pass def future_add_done_callback( # noqa: F811 future: "Union[futures.Future[_T], Future[_T]]", callback: Callable[..., None] ) -> None: """Arrange to call ``callback`` when ``future`` is complete. ``callback`` is invoked with one argument, the ``future``. If ``future`` is already done, ``callback`` is invoked immediately. This may differ from the behavior of ``Future.add_done_callback``, which makes no such guarantee. .. versionadded:: 5.0 """ if future.done(): callback(future) else: future.add_done_callback(callback) tornado-6.1.0/tornado/curl_httpclient.py000066400000000000000000000577771374705040500204430ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Non-blocking HTTP client implementation using pycurl.""" import collections import functools import logging import pycurl import threading import time from io import BytesIO from tornado import httputil from tornado import ioloop from tornado.escape import utf8, native_str from tornado.httpclient import ( HTTPRequest, HTTPResponse, HTTPError, AsyncHTTPClient, main, ) from tornado.log import app_log from typing import Dict, Any, Callable, Union, Tuple, Optional import typing if typing.TYPE_CHECKING: from typing import Deque # noqa: F401 curl_log = logging.getLogger("tornado.curl_httpclient") class CurlAsyncHTTPClient(AsyncHTTPClient): def initialize( # type: ignore self, max_clients: int = 10, defaults: Optional[Dict[str, Any]] = None ) -> None: super().initialize(defaults=defaults) # Typeshed is incomplete for CurlMulti, so just use Any for now. self._multi = pycurl.CurlMulti() # type: Any self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) self._curls = [self._curl_create() for i in range(max_clients)] self._free_list = self._curls[:] self._requests = ( collections.deque() ) # type: Deque[Tuple[HTTPRequest, Callable[[HTTPResponse], None], float]] self._fds = {} # type: Dict[int, int] self._timeout = None # type: Optional[object] # libcurl has bugs that sometimes cause it to not report all # relevant file descriptors and timeouts to TIMERFUNCTION/ # SOCKETFUNCTION. Mitigate the effects of such bugs by # forcing a periodic scan of all active requests. self._force_timeout_callback = ioloop.PeriodicCallback( self._handle_force_timeout, 1000 ) self._force_timeout_callback.start() # Work around a bug in libcurl 7.29.0: Some fields in the curl # multi object are initialized lazily, and its destructor will # segfault if it is destroyed without having been used. Add # and remove a dummy handle to make sure everything is # initialized. dummy_curl_handle = pycurl.Curl() self._multi.add_handle(dummy_curl_handle) self._multi.remove_handle(dummy_curl_handle) def close(self) -> None: self._force_timeout_callback.stop() if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) for curl in self._curls: curl.close() self._multi.close() super().close() # Set below properties to None to reduce the reference count of current # instance, because those properties hold some methods of current # instance that will case circular reference. self._force_timeout_callback = None # type: ignore self._multi = None def fetch_impl( self, request: HTTPRequest, callback: Callable[[HTTPResponse], None] ) -> None: self._requests.append((request, callback, self.io_loop.time())) self._process_queue() self._set_timeout(0) def _handle_socket(self, event: int, fd: int, multi: Any, data: bytes) -> None: """Called by libcurl when it wants to change the file descriptors it cares about. """ event_map = { pycurl.POLL_NONE: ioloop.IOLoop.NONE, pycurl.POLL_IN: ioloop.IOLoop.READ, pycurl.POLL_OUT: ioloop.IOLoop.WRITE, pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE, } if event == pycurl.POLL_REMOVE: if fd in self._fds: self.io_loop.remove_handler(fd) del self._fds[fd] else: ioloop_event = event_map[event] # libcurl sometimes closes a socket and then opens a new # one using the same FD without giving us a POLL_NONE in # between. This is a problem with the epoll IOLoop, # because the kernel can tell when a socket is closed and # removes it from the epoll automatically, causing future # update_handler calls to fail. Since we can't tell when # this has happened, always use remove and re-add # instead of update. if fd in self._fds: self.io_loop.remove_handler(fd) self.io_loop.add_handler(fd, self._handle_events, ioloop_event) self._fds[fd] = ioloop_event def _set_timeout(self, msecs: int) -> None: """Called by libcurl to schedule a timeout.""" if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = self.io_loop.add_timeout( self.io_loop.time() + msecs / 1000.0, self._handle_timeout ) def _handle_events(self, fd: int, events: int) -> None: """Called by IOLoop when there is activity on one of our file descriptors. """ action = 0 if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT while True: try: ret, num_handles = self._multi.socket_action(fd, action) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() def _handle_timeout(self) -> None: """Called by IOLoop when the requested timeout has passed.""" self._timeout = None while True: try: ret, num_handles = self._multi.socket_action(pycurl.SOCKET_TIMEOUT, 0) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() # In theory, we shouldn't have to do this because curl will # call _set_timeout whenever the timeout changes. However, # sometimes after _handle_timeout we will need to reschedule # immediately even though nothing has changed from curl's # perspective. This is because when socket_action is # called with SOCKET_TIMEOUT, libcurl decides internally which # timeouts need to be processed by using a monotonic clock # (where available) while tornado uses python's time.time() # to decide when timeouts have occurred. When those clocks # disagree on elapsed time (as they will whenever there is an # NTP adjustment), tornado might call _handle_timeout before # libcurl is ready. After each timeout, resync the scheduled # timeout with libcurl's current state. new_timeout = self._multi.timeout() if new_timeout >= 0: self._set_timeout(new_timeout) def _handle_force_timeout(self) -> None: """Called by IOLoop periodically to ask libcurl to process any events it may have forgotten about. """ while True: try: ret, num_handles = self._multi.socket_all() except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() def _finish_pending_requests(self) -> None: """Process any requests that were completed by the last call to multi.socket_action. """ while True: num_q, ok_list, err_list = self._multi.info_read() for curl in ok_list: self._finish(curl) for curl, errnum, errmsg in err_list: self._finish(curl, errnum, errmsg) if num_q == 0: break self._process_queue() def _process_queue(self) -> None: while True: started = 0 while self._free_list and self._requests: started += 1 curl = self._free_list.pop() (request, callback, queue_start_time) = self._requests.popleft() # TODO: Don't smuggle extra data on an attribute of the Curl object. curl.info = { # type: ignore "headers": httputil.HTTPHeaders(), "buffer": BytesIO(), "request": request, "callback": callback, "queue_start_time": queue_start_time, "curl_start_time": time.time(), "curl_start_ioloop_time": self.io_loop.current().time(), } try: self._curl_setup_request( curl, request, curl.info["buffer"], # type: ignore curl.info["headers"], # type: ignore ) except Exception as e: # If there was an error in setup, pass it on # to the callback. Note that allowing the # error to escape here will appear to work # most of the time since we are still in the # caller's original stack frame, but when # _process_queue() is called from # _finish_pending_requests the exceptions have # nowhere to go. self._free_list.append(curl) callback(HTTPResponse(request=request, code=599, error=e)) else: self._multi.add_handle(curl) if not started: break def _finish( self, curl: pycurl.Curl, curl_error: Optional[int] = None, curl_message: Optional[str] = None, ) -> None: info = curl.info # type: ignore curl.info = None # type: ignore self._multi.remove_handle(curl) self._free_list.append(curl) buffer = info["buffer"] if curl_error: assert curl_message is not None error = CurlError(curl_error, curl_message) # type: Optional[CurlError] assert error is not None code = error.code effective_url = None buffer.close() buffer = None else: error = None code = curl.getinfo(pycurl.HTTP_CODE) effective_url = curl.getinfo(pycurl.EFFECTIVE_URL) buffer.seek(0) # the various curl timings are documented at # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html time_info = dict( queue=info["curl_start_ioloop_time"] - info["queue_start_time"], namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME), connect=curl.getinfo(pycurl.CONNECT_TIME), appconnect=curl.getinfo(pycurl.APPCONNECT_TIME), pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME), starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME), total=curl.getinfo(pycurl.TOTAL_TIME), redirect=curl.getinfo(pycurl.REDIRECT_TIME), ) try: info["callback"]( HTTPResponse( request=info["request"], code=code, headers=info["headers"], buffer=buffer, effective_url=effective_url, error=error, reason=info["headers"].get("X-Http-Reason", None), request_time=self.io_loop.time() - info["curl_start_ioloop_time"], start_time=info["curl_start_time"], time_info=time_info, ) ) except Exception: self.handle_callback_exception(info["callback"]) def handle_callback_exception(self, callback: Any) -> None: app_log.error("Exception in callback %r", callback, exc_info=True) def _curl_create(self) -> pycurl.Curl: curl = pycurl.Curl() if curl_log.isEnabledFor(logging.DEBUG): curl.setopt(pycurl.VERBOSE, 1) curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug) if hasattr( pycurl, "PROTOCOLS" ): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12) curl.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) curl.setopt(pycurl.REDIR_PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) return curl def _curl_setup_request( self, curl: pycurl.Curl, request: HTTPRequest, buffer: BytesIO, headers: httputil.HTTPHeaders, ) -> None: curl.setopt(pycurl.URL, native_str(request.url)) # libcurl's magic "Expect: 100-continue" behavior causes delays # with servers that don't support it (which include, among others, # Google's OpenID endpoint). Additionally, this behavior has # a bug in conjunction with the curl_multi_socket_action API # (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976), # which increases the delays. It's more trouble than it's worth, # so just turn off the feature (yes, setting Expect: to an empty # value is the official way to disable this) if "Expect" not in request.headers: request.headers["Expect"] = "" # libcurl adds Pragma: no-cache by default; disable that too if "Pragma" not in request.headers: request.headers["Pragma"] = "" curl.setopt( pycurl.HTTPHEADER, [ "%s: %s" % (native_str(k), native_str(v)) for k, v in request.headers.get_all() ], ) curl.setopt( pycurl.HEADERFUNCTION, functools.partial( self._curl_header_callback, headers, request.header_callback ), ) if request.streaming_callback: def write_function(b: Union[bytes, bytearray]) -> int: assert request.streaming_callback is not None self.io_loop.add_callback(request.streaming_callback, b) return len(b) else: write_function = buffer.write curl.setopt(pycurl.WRITEFUNCTION, write_function) curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects) curl.setopt(pycurl.MAXREDIRS, request.max_redirects) assert request.connect_timeout is not None curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout)) assert request.request_timeout is not None curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout)) if request.user_agent: curl.setopt(pycurl.USERAGENT, native_str(request.user_agent)) else: curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)") if request.network_interface: curl.setopt(pycurl.INTERFACE, request.network_interface) if request.decompress_response: curl.setopt(pycurl.ENCODING, "gzip,deflate") else: curl.setopt(pycurl.ENCODING, None) if request.proxy_host and request.proxy_port: curl.setopt(pycurl.PROXY, request.proxy_host) curl.setopt(pycurl.PROXYPORT, request.proxy_port) if request.proxy_username: assert request.proxy_password is not None credentials = httputil.encode_username_password( request.proxy_username, request.proxy_password ) curl.setopt(pycurl.PROXYUSERPWD, credentials) if request.proxy_auth_mode is None or request.proxy_auth_mode == "basic": curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_BASIC) elif request.proxy_auth_mode == "digest": curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_DIGEST) else: raise ValueError( "Unsupported proxy_auth_mode %s" % request.proxy_auth_mode ) else: try: curl.unsetopt(pycurl.PROXY) except TypeError: # not supported, disable proxy curl.setopt(pycurl.PROXY, "") curl.unsetopt(pycurl.PROXYUSERPWD) if request.validate_cert: curl.setopt(pycurl.SSL_VERIFYPEER, 1) curl.setopt(pycurl.SSL_VERIFYHOST, 2) else: curl.setopt(pycurl.SSL_VERIFYPEER, 0) curl.setopt(pycurl.SSL_VERIFYHOST, 0) if request.ca_certs is not None: curl.setopt(pycurl.CAINFO, request.ca_certs) else: # There is no way to restore pycurl.CAINFO to its default value # (Using unsetopt makes it reject all certificates). # I don't see any way to read the default value from python so it # can be restored later. We'll have to just leave CAINFO untouched # if no ca_certs file was specified, and require that if any # request uses a custom ca_certs file, they all must. pass if request.allow_ipv6 is False: # Curl behaves reasonably when DNS resolution gives an ipv6 address # that we can't reach, so allow ipv6 unless the user asks to disable. curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) else: curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER) # Set the request method through curl's irritating interface which makes # up names for almost every single method curl_options = { "GET": pycurl.HTTPGET, "POST": pycurl.POST, "PUT": pycurl.UPLOAD, "HEAD": pycurl.NOBODY, } custom_methods = set(["DELETE", "OPTIONS", "PATCH"]) for o in curl_options.values(): curl.setopt(o, False) if request.method in curl_options: curl.unsetopt(pycurl.CUSTOMREQUEST) curl.setopt(curl_options[request.method], True) elif request.allow_nonstandard_methods or request.method in custom_methods: curl.setopt(pycurl.CUSTOMREQUEST, request.method) else: raise KeyError("unknown method " + request.method) body_expected = request.method in ("POST", "PATCH", "PUT") body_present = request.body is not None if not request.allow_nonstandard_methods: # Some HTTP methods nearly always have bodies while others # almost never do. Fail in this case unless the user has # opted out of sanity checks with allow_nonstandard_methods. if (body_expected and not body_present) or ( body_present and not body_expected ): raise ValueError( "Body must %sbe None for method %s (unless " "allow_nonstandard_methods is true)" % ("not " if body_expected else "", request.method) ) if body_expected or body_present: if request.method == "GET": # Even with `allow_nonstandard_methods` we disallow # GET with a body (because libcurl doesn't allow it # unless we use CUSTOMREQUEST). While the spec doesn't # forbid clients from sending a body, it arguably # disallows the server from doing anything with them. raise ValueError("Body must be None for GET request") request_buffer = BytesIO(utf8(request.body or "")) def ioctl(cmd: int) -> None: if cmd == curl.IOCMD_RESTARTREAD: # type: ignore request_buffer.seek(0) curl.setopt(pycurl.READFUNCTION, request_buffer.read) curl.setopt(pycurl.IOCTLFUNCTION, ioctl) if request.method == "POST": curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or "")) else: curl.setopt(pycurl.UPLOAD, True) curl.setopt(pycurl.INFILESIZE, len(request.body or "")) if request.auth_username is not None: assert request.auth_password is not None if request.auth_mode is None or request.auth_mode == "basic": curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) elif request.auth_mode == "digest": curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST) else: raise ValueError("Unsupported auth_mode %s" % request.auth_mode) userpwd = httputil.encode_username_password( request.auth_username, request.auth_password ) curl.setopt(pycurl.USERPWD, userpwd) curl_log.debug( "%s %s (username: %r)", request.method, request.url, request.auth_username, ) else: curl.unsetopt(pycurl.USERPWD) curl_log.debug("%s %s", request.method, request.url) if request.client_cert is not None: curl.setopt(pycurl.SSLCERT, request.client_cert) if request.client_key is not None: curl.setopt(pycurl.SSLKEY, request.client_key) if request.ssl_options is not None: raise ValueError("ssl_options not supported in curl_httpclient") if threading.active_count() > 1: # libcurl/pycurl is not thread-safe by default. When multiple threads # are used, signals should be disabled. This has the side effect # of disabling DNS timeouts in some environments (when libcurl is # not linked against ares), so we don't do it when there is only one # thread. Applications that use many short-lived threads may need # to set NOSIGNAL manually in a prepare_curl_callback since # there may not be any other threads running at the time we call # threading.activeCount. curl.setopt(pycurl.NOSIGNAL, 1) if request.prepare_curl_callback is not None: request.prepare_curl_callback(curl) def _curl_header_callback( self, headers: httputil.HTTPHeaders, header_callback: Callable[[str], None], header_line_bytes: bytes, ) -> None: header_line = native_str(header_line_bytes.decode("latin1")) if header_callback is not None: self.io_loop.add_callback(header_callback, header_line) # header_line as returned by curl includes the end-of-line characters. # whitespace at the start should be preserved to allow multi-line headers header_line = header_line.rstrip() if header_line.startswith("HTTP/"): headers.clear() try: (__, __, reason) = httputil.parse_response_start_line(header_line) header_line = "X-Http-Reason: %s" % reason except httputil.HTTPInputError: return if not header_line: return headers.parse_line(header_line) def _curl_debug(self, debug_type: int, debug_msg: str) -> None: debug_types = ("I", "<", ">", "<", ">") if debug_type == 0: debug_msg = native_str(debug_msg) curl_log.debug("%s", debug_msg.strip()) elif debug_type in (1, 2): debug_msg = native_str(debug_msg) for line in debug_msg.splitlines(): curl_log.debug("%s %s", debug_types[debug_type], line) elif debug_type == 4: curl_log.debug("%s %r", debug_types[debug_type], debug_msg) class CurlError(HTTPError): def __init__(self, errno: int, message: str) -> None: HTTPError.__init__(self, 599, message) self.errno = errno if __name__ == "__main__": AsyncHTTPClient.configure(CurlAsyncHTTPClient) main() tornado-6.1.0/tornado/escape.py000066400000000000000000000317231374705040500164570ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Escaping/unescaping methods for HTML, JSON, URLs, and others. Also includes a few other miscellaneous string manipulation functions that have crept in over time. """ import html.entities import json import re import urllib.parse from tornado.util import unicode_type import typing from typing import Union, Any, Optional, Dict, List, Callable _XHTML_ESCAPE_RE = re.compile("[&<>\"']") _XHTML_ESCAPE_DICT = { "&": "&", "<": "<", ">": ">", '"': """, "'": "'", } def xhtml_escape(value: Union[str, bytes]) -> str: """Escapes a string so it is valid within HTML or XML. Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``. When used in attribute values the escaped strings must be enclosed in quotes. .. versionchanged:: 3.2 Added the single quote to the list of escaped characters. """ return _XHTML_ESCAPE_RE.sub( lambda match: _XHTML_ESCAPE_DICT[match.group(0)], to_basestring(value) ) def xhtml_unescape(value: Union[str, bytes]) -> str: """Un-escapes an XML-escaped string.""" return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value)) # The fact that json_encode wraps json.dumps is an implementation detail. # Please see https://github.com/tornadoweb/tornado/pull/706 # before sending a pull request that adds **kwargs to this function. def json_encode(value: Any) -> str: """JSON-encodes the given Python object.""" # JSON permits but does not require forward slashes to be escaped. # This is useful when json data is emitted in a tags from prematurely terminating # the JavaScript. Some json libraries do this escaping by default, # although python's standard library does not, so we do it here. # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped return json.dumps(value).replace(" Any: """Returns Python objects for the given JSON string. Supports both `str` and `bytes` inputs. """ return json.loads(to_basestring(value)) def squeeze(value: str) -> str: """Replace all sequences of whitespace chars with a single space.""" return re.sub(r"[\x00-\x20]+", " ", value).strip() def url_escape(value: Union[str, bytes], plus: bool = True) -> str: """Returns a URL-encoded version of the given value. If ``plus`` is true (the default), spaces will be represented as "+" instead of "%20". This is appropriate for query strings but not for the path component of a URL. Note that this default is the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument """ quote = urllib.parse.quote_plus if plus else urllib.parse.quote return quote(utf8(value)) @typing.overload def url_unescape(value: Union[str, bytes], encoding: None, plus: bool = True) -> bytes: pass @typing.overload # noqa: F811 def url_unescape( value: Union[str, bytes], encoding: str = "utf-8", plus: bool = True ) -> str: pass def url_unescape( # noqa: F811 value: Union[str, bytes], encoding: Optional[str] = "utf-8", plus: bool = True ) -> Union[str, bytes]: """Decodes the given value from a URL. The argument may be either a byte or unicode string. If encoding is None, the result will be a byte string. Otherwise, the result is a unicode string in the specified encoding. If ``plus`` is true (the default), plus signs will be interpreted as spaces (literal plus signs must be represented as "%2B"). This is appropriate for query strings and form-encoded values but not for the path component of a URL. Note that this default is the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument """ if encoding is None: if plus: # unquote_to_bytes doesn't have a _plus variant value = to_basestring(value).replace("+", " ") return urllib.parse.unquote_to_bytes(value) else: unquote = urllib.parse.unquote_plus if plus else urllib.parse.unquote return unquote(to_basestring(value), encoding=encoding) def parse_qs_bytes( qs: Union[str, bytes], keep_blank_values: bool = False, strict_parsing: bool = False ) -> Dict[str, List[bytes]]: """Parses a query string like urlparse.parse_qs, but takes bytes and returns the values as byte strings. Keys still become type str (interpreted as latin1 in python3!) because it's too painful to keep them as byte strings in python3 and in practice they're nearly always ascii anyway. """ # This is gross, but python3 doesn't give us another way. # Latin1 is the universal donor of character encodings. if isinstance(qs, bytes): qs = qs.decode("latin1") result = urllib.parse.parse_qs( qs, keep_blank_values, strict_parsing, encoding="latin1", errors="strict" ) encoded = {} for k, v in result.items(): encoded[k] = [i.encode("latin1") for i in v] return encoded _UTF8_TYPES = (bytes, type(None)) @typing.overload def utf8(value: bytes) -> bytes: pass @typing.overload # noqa: F811 def utf8(value: str) -> bytes: pass @typing.overload # noqa: F811 def utf8(value: None) -> None: pass def utf8(value: Union[None, str, bytes]) -> Optional[bytes]: # noqa: F811 """Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8. """ if isinstance(value, _UTF8_TYPES): return value if not isinstance(value, unicode_type): raise TypeError("Expected bytes, unicode, or None; got %r" % type(value)) return value.encode("utf-8") _TO_UNICODE_TYPES = (unicode_type, type(None)) @typing.overload def to_unicode(value: str) -> str: pass @typing.overload # noqa: F811 def to_unicode(value: bytes) -> str: pass @typing.overload # noqa: F811 def to_unicode(value: None) -> None: pass def to_unicode(value: Union[None, str, bytes]) -> Optional[str]: # noqa: F811 """Converts a string argument to a unicode string. If the argument is already a unicode string or None, it is returned unchanged. Otherwise it must be a byte string and is decoded as utf8. """ if isinstance(value, _TO_UNICODE_TYPES): return value if not isinstance(value, bytes): raise TypeError("Expected bytes, unicode, or None; got %r" % type(value)) return value.decode("utf-8") # to_unicode was previously named _unicode not because it was private, # but to avoid conflicts with the built-in unicode() function/type _unicode = to_unicode # When dealing with the standard library across python 2 and 3 it is # sometimes useful to have a direct conversion to the native string type native_str = to_unicode to_basestring = to_unicode def recursive_unicode(obj: Any) -> Any: """Walks a simple data structure, converting byte strings to unicode. Supports lists, tuples, and dictionaries. """ if isinstance(obj, dict): return dict( (recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items() ) elif isinstance(obj, list): return list(recursive_unicode(i) for i in obj) elif isinstance(obj, tuple): return tuple(recursive_unicode(i) for i in obj) elif isinstance(obj, bytes): return to_unicode(obj) else: return obj # I originally used the regex from # http://daringfireball.net/2010/07/improved_regex_for_matching_urls # but it gets all exponential on certain patterns (such as too many trailing # dots), causing the regex matcher to never return. # This regex should avoid those problems. # Use to_unicode instead of tornado.util.u - we don't want backslashes getting # processed as escapes. _URL_RE = re.compile( to_unicode( r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""" # noqa: E501 ) ) def linkify( text: Union[str, bytes], shorten: bool = False, extra_params: Union[str, Callable[[str], str]] = "", require_protocol: bool = False, permitted_protocols: List[str] = ["http", "https"], ) -> str: """Converts plain text into HTML with links. For example: ``linkify("Hello http://tornadoweb.org!")`` would return ``Hello http://tornadoweb.org!`` Parameters: * ``shorten``: Long urls will be shortened for display. * ``extra_params``: Extra text to include in the link tag, or a callable taking the link as an argument and returning the extra text e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, or:: def extra_params_cb(url): if url.startswith("http://example.com"): return 'class="internal"' else: return 'class="external" rel="nofollow"' linkify(text, extra_params=extra_params_cb) * ``require_protocol``: Only linkify urls which include a protocol. If this is False, urls such as www.facebook.com will also be linkified. * ``permitted_protocols``: List (or set) of protocols which should be linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", "mailto"])``. It is very unsafe to include protocols such as ``javascript``. """ if extra_params and not callable(extra_params): extra_params = " " + extra_params.strip() def make_link(m: typing.Match) -> str: url = m.group(1) proto = m.group(2) if require_protocol and not proto: return url # not protocol, no linkify if proto and proto not in permitted_protocols: return url # bad protocol, no linkify href = m.group(1) if not proto: href = "http://" + href # no proto specified, use http if callable(extra_params): params = " " + extra_params(href).strip() else: params = extra_params # clip long urls. max_len is just an approximation max_len = 30 if shorten and len(url) > max_len: before_clip = url if proto: proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for : else: proto_len = 0 parts = url[proto_len:].split("/") if len(parts) > 1: # Grab the whole host part plus the first bit of the path # The path is usually not that interesting once shortened # (no more slug, etc), so it really just provides a little # extra indication of shortening. url = ( url[:proto_len] + parts[0] + "/" + parts[1][:8].split("?")[0].split(".")[0] ) if len(url) > max_len * 1.5: # still too long url = url[:max_len] if url != before_clip: amp = url.rfind("&") # avoid splitting html char entities if amp > max_len - 5: url = url[:amp] url += "..." if len(url) >= len(before_clip): url = before_clip else: # full url is visible on mouse-over (for those who don't # have a status bar, such as Safari by default) params += ' title="%s"' % href return u'%s' % (href, params, url) # First HTML-escape so that our strings are all safe. # The regex is modified to avoid character entites other than & so # that we won't pick up ", etc. text = _unicode(xhtml_escape(text)) return _URL_RE.sub(make_link, text) def _convert_entity(m: typing.Match) -> str: if m.group(1) == "#": try: if m.group(2)[:1].lower() == "x": return chr(int(m.group(2)[1:], 16)) else: return chr(int(m.group(2))) except ValueError: return "&#%s;" % m.group(2) try: return _HTML_UNICODE_MAP[m.group(2)] except KeyError: return "&%s;" % m.group(2) def _build_unicode_map() -> Dict[str, str]: unicode_map = {} for name, value in html.entities.name2codepoint.items(): unicode_map[name] = chr(value) return unicode_map _HTML_UNICODE_MAP = _build_unicode_map() tornado-6.1.0/tornado/gen.py000066400000000000000000000743551374705040500160000ustar00rootroot00000000000000"""``tornado.gen`` implements generator-based coroutines. .. note:: The "decorator and generator" approach in this module is a precursor to native coroutines (using ``async def`` and ``await``) which were introduced in Python 3.5. Applications that do not require compatibility with older versions of Python should use native coroutines instead. Some parts of this module are still useful with native coroutines, notably `multi`, `sleep`, `WaitIterator`, and `with_timeout`. Some of these functions have counterparts in the `asyncio` module which may be used as well, although the two may not necessarily be 100% compatible. Coroutines provide an easier way to work in an asynchronous environment than chaining callbacks. Code using coroutines is technically asynchronous, but it is written as a single generator instead of a collection of separate functions. For example, here's a coroutine-based handler: .. testcode:: class GenAsyncHandler(RequestHandler): @gen.coroutine def get(self): http_client = AsyncHTTPClient() response = yield http_client.fetch("http://example.com") do_something_with_response(response) self.render("template.html") .. testoutput:: :hide: Asynchronous functions in Tornado return an ``Awaitable`` or `.Future`; yielding this object returns its result. You can also yield a list or dict of other yieldable objects, which will be started at the same time and run in parallel; a list or dict of results will be returned when they are all finished: .. testcode:: @gen.coroutine def get(self): http_client = AsyncHTTPClient() response1, response2 = yield [http_client.fetch(url1), http_client.fetch(url2)] response_dict = yield dict(response3=http_client.fetch(url3), response4=http_client.fetch(url4)) response3 = response_dict['response3'] response4 = response_dict['response4'] .. testoutput:: :hide: If ``tornado.platform.twisted`` is imported, it is also possible to yield Twisted's ``Deferred`` objects. See the `convert_yielded` function to extend this mechanism. .. versionchanged:: 3.2 Dict support added. .. versionchanged:: 4.1 Support added for yielding ``asyncio`` Futures and Twisted Deferreds via ``singledispatch``. """ import asyncio import builtins import collections from collections.abc import Generator import concurrent.futures import datetime import functools from functools import singledispatch from inspect import isawaitable import sys import types from tornado.concurrent import ( Future, is_future, chain_future, future_set_exc_info, future_add_done_callback, future_set_result_unless_cancelled, ) from tornado.ioloop import IOLoop from tornado.log import app_log from tornado.util import TimeoutError try: import contextvars except ImportError: contextvars = None # type: ignore import typing from typing import Union, Any, Callable, List, Type, Tuple, Awaitable, Dict, overload if typing.TYPE_CHECKING: from typing import Sequence, Deque, Optional, Set, Iterable # noqa: F401 _T = typing.TypeVar("_T") _Yieldable = Union[ None, Awaitable, List[Awaitable], Dict[Any, Awaitable], concurrent.futures.Future ] class KeyReuseError(Exception): pass class UnknownKeyError(Exception): pass class LeakedCallbackError(Exception): pass class BadYieldError(Exception): pass class ReturnValueIgnoredError(Exception): pass def _value_from_stopiteration(e: Union[StopIteration, "Return"]) -> Any: try: # StopIteration has a value attribute beginning in py33. # So does our Return class. return e.value except AttributeError: pass try: # Cython backports coroutine functionality by putting the value in # e.args[0]. return e.args[0] except (AttributeError, IndexError): return None def _create_future() -> Future: future = Future() # type: Future # Fixup asyncio debug info by removing extraneous stack entries source_traceback = getattr(future, "_source_traceback", ()) while source_traceback: # Each traceback entry is equivalent to a # (filename, self.lineno, self.name, self.line) tuple filename = source_traceback[-1][0] if filename == __file__: del source_traceback[-1] else: break return future def _fake_ctx_run(f: Callable[..., _T], *args: Any, **kw: Any) -> _T: return f(*args, **kw) @overload def coroutine( func: Callable[..., "Generator[Any, Any, _T]"] ) -> Callable[..., "Future[_T]"]: ... @overload def coroutine(func: Callable[..., _T]) -> Callable[..., "Future[_T]"]: ... def coroutine( func: Union[Callable[..., "Generator[Any, Any, _T]"], Callable[..., _T]] ) -> Callable[..., "Future[_T]"]: """Decorator for asynchronous generators. For compatibility with older versions of Python, coroutines may also "return" by raising the special exception `Return(value) `. Functions with this decorator return a `.Future`. .. warning:: When exceptions occur inside a coroutine, the exception information will be stored in the `.Future` object. You must examine the result of the `.Future` object, or the exception may go unnoticed by your code. This means yielding the function if called from another coroutine, using something like `.IOLoop.run_sync` for top-level calls, or passing the `.Future` to `.IOLoop.add_future`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ @functools.wraps(func) def wrapper(*args, **kwargs): # type: (*Any, **Any) -> Future[_T] # This function is type-annotated with a comment to work around # https://bitbucket.org/pypy/pypy/issues/2868/segfault-with-args-type-annotation-in future = _create_future() if contextvars is not None: ctx_run = contextvars.copy_context().run # type: Callable else: ctx_run = _fake_ctx_run try: result = ctx_run(func, *args, **kwargs) except (Return, StopIteration) as e: result = _value_from_stopiteration(e) except Exception: future_set_exc_info(future, sys.exc_info()) try: return future finally: # Avoid circular references future = None # type: ignore else: if isinstance(result, Generator): # Inline the first iteration of Runner.run. This lets us # avoid the cost of creating a Runner when the coroutine # never actually yields, which in turn allows us to # use "optional" coroutines in critical path code without # performance penalty for the synchronous case. try: yielded = ctx_run(next, result) except (StopIteration, Return) as e: future_set_result_unless_cancelled( future, _value_from_stopiteration(e) ) except Exception: future_set_exc_info(future, sys.exc_info()) else: # Provide strong references to Runner objects as long # as their result future objects also have strong # references (typically from the parent coroutine's # Runner). This keeps the coroutine's Runner alive. # We do this by exploiting the public API # add_done_callback() instead of putting a private # attribute on the Future. # (GitHub issues #1769, #2229). runner = Runner(ctx_run, result, future, yielded) future.add_done_callback(lambda _: runner) yielded = None try: return future finally: # Subtle memory optimization: if next() raised an exception, # the future's exc_info contains a traceback which # includes this stack frame. This creates a cycle, # which will be collected at the next full GC but has # been shown to greatly increase memory usage of # benchmarks (relative to the refcount-based scheme # used in the absence of cycles). We can avoid the # cycle by clearing the local variable after we return it. future = None # type: ignore future_set_result_unless_cancelled(future, result) return future wrapper.__wrapped__ = func # type: ignore wrapper.__tornado_coroutine__ = True # type: ignore return wrapper def is_coroutine_function(func: Any) -> bool: """Return whether *func* is a coroutine function, i.e. a function wrapped with `~.gen.coroutine`. .. versionadded:: 4.5 """ return getattr(func, "__tornado_coroutine__", False) class Return(Exception): """Special exception to return a value from a `coroutine`. If this exception is raised, its value argument is used as the result of the coroutine:: @gen.coroutine def fetch_json(url): response = yield AsyncHTTPClient().fetch(url) raise gen.Return(json_decode(response.body)) In Python 3.3, this exception is no longer necessary: the ``return`` statement can be used directly to return a value (previously ``yield`` and ``return`` with a value could not be combined in the same function). By analogy with the return statement, the value argument is optional, but it is never necessary to ``raise gen.Return()``. The ``return`` statement can be used with no arguments instead. """ def __init__(self, value: Any = None) -> None: super().__init__() self.value = value # Cython recognizes subclasses of StopIteration with a .args tuple. self.args = (value,) class WaitIterator(object): """Provides an iterator to yield the results of awaitables as they finish. Yielding a set of awaitables like this: ``results = yield [awaitable1, awaitable2]`` pauses the coroutine until both ``awaitable1`` and ``awaitable2`` return, and then restarts the coroutine with the results of both awaitables. If either awaitable raises an exception, the expression will raise that exception and all the results will be lost. If you need to get the result of each awaitable as soon as possible, or if you need the result of some awaitables even if others produce errors, you can use ``WaitIterator``:: wait_iterator = gen.WaitIterator(awaitable1, awaitable2) while not wait_iterator.done(): try: result = yield wait_iterator.next() except Exception as e: print("Error {} from {}".format(e, wait_iterator.current_future)) else: print("Result {} received from {} at {}".format( result, wait_iterator.current_future, wait_iterator.current_index)) Because results are returned as soon as they are available the output from the iterator *will not be in the same order as the input arguments*. If you need to know which future produced the current result, you can use the attributes ``WaitIterator.current_future``, or ``WaitIterator.current_index`` to get the index of the awaitable from the input list. (if keyword arguments were used in the construction of the `WaitIterator`, ``current_index`` will use the corresponding keyword). On Python 3.5, `WaitIterator` implements the async iterator protocol, so it can be used with the ``async for`` statement (note that in this version the entire iteration is aborted if any value raises an exception, while the previous example can continue past individual errors):: async for result in gen.WaitIterator(future1, future2): print("Result {} received from {} at {}".format( result, wait_iterator.current_future, wait_iterator.current_index)) .. versionadded:: 4.1 .. versionchanged:: 4.3 Added ``async for`` support in Python 3.5. """ _unfinished = {} # type: Dict[Future, Union[int, str]] def __init__(self, *args: Future, **kwargs: Future) -> None: if args and kwargs: raise ValueError("You must provide args or kwargs, not both") if kwargs: self._unfinished = dict((f, k) for (k, f) in kwargs.items()) futures = list(kwargs.values()) # type: Sequence[Future] else: self._unfinished = dict((f, i) for (i, f) in enumerate(args)) futures = args self._finished = collections.deque() # type: Deque[Future] self.current_index = None # type: Optional[Union[str, int]] self.current_future = None # type: Optional[Future] self._running_future = None # type: Optional[Future] for future in futures: future_add_done_callback(future, self._done_callback) def done(self) -> bool: """Returns True if this iterator has no more results.""" if self._finished or self._unfinished: return False # Clear the 'current' values when iteration is done. self.current_index = self.current_future = None return True def next(self) -> Future: """Returns a `.Future` that will yield the next available result. Note that this `.Future` will not be the same object as any of the inputs. """ self._running_future = Future() if self._finished: self._return_result(self._finished.popleft()) return self._running_future def _done_callback(self, done: Future) -> None: if self._running_future and not self._running_future.done(): self._return_result(done) else: self._finished.append(done) def _return_result(self, done: Future) -> None: """Called set the returned future's state that of the future we yielded, and set the current future for the iterator. """ if self._running_future is None: raise Exception("no future is running") chain_future(done, self._running_future) self.current_future = done self.current_index = self._unfinished.pop(done) def __aiter__(self) -> typing.AsyncIterator: return self def __anext__(self) -> Future: if self.done(): # Lookup by name to silence pyflakes on older versions. raise getattr(builtins, "StopAsyncIteration")() return self.next() def multi( children: Union[List[_Yieldable], Dict[Any, _Yieldable]], quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (), ) -> "Union[Future[List], Future[Dict]]": """Runs multiple asynchronous operations in parallel. ``children`` may either be a list or a dict whose values are yieldable objects. ``multi()`` returns a new yieldable object that resolves to a parallel structure containing their results. If ``children`` is a list, the result is a list of results in the same order; if it is a dict, the result is a dict with the same keys. That is, ``results = yield multi(list_of_futures)`` is equivalent to:: results = [] for future in list_of_futures: results.append(yield future) If any children raise exceptions, ``multi()`` will raise the first one. All others will be logged, unless they are of types contained in the ``quiet_exceptions`` argument. In a ``yield``-based coroutine, it is not normally necessary to call this function directly, since the coroutine runner will do it automatically when a list or dict is yielded. However, it is necessary in ``await``-based coroutines, or to pass the ``quiet_exceptions`` argument. This function is available under the names ``multi()`` and ``Multi()`` for historical reasons. Cancelling a `.Future` returned by ``multi()`` does not cancel its children. `asyncio.gather` is similar to ``multi()``, but it does cancel its children. .. versionchanged:: 4.2 If multiple yieldables fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` argument to suppress this logging for selected exception types. .. versionchanged:: 4.3 Replaced the class ``Multi`` and the function ``multi_future`` with a unified function ``multi``. Added support for yieldables other than ``YieldPoint`` and `.Future`. """ return multi_future(children, quiet_exceptions=quiet_exceptions) Multi = multi def multi_future( children: Union[List[_Yieldable], Dict[Any, _Yieldable]], quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (), ) -> "Union[Future[List], Future[Dict]]": """Wait for multiple asynchronous futures in parallel. Since Tornado 6.0, this function is exactly the same as `multi`. .. versionadded:: 4.0 .. versionchanged:: 4.2 If multiple ``Futures`` fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` argument to suppress this logging for selected exception types. .. deprecated:: 4.3 Use `multi` instead. """ if isinstance(children, dict): keys = list(children.keys()) # type: Optional[List] children_seq = children.values() # type: Iterable else: keys = None children_seq = children children_futs = list(map(convert_yielded, children_seq)) assert all(is_future(i) or isinstance(i, _NullFuture) for i in children_futs) unfinished_children = set(children_futs) future = _create_future() if not children_futs: future_set_result_unless_cancelled(future, {} if keys is not None else []) def callback(fut: Future) -> None: unfinished_children.remove(fut) if not unfinished_children: result_list = [] for f in children_futs: try: result_list.append(f.result()) except Exception as e: if future.done(): if not isinstance(e, quiet_exceptions): app_log.error( "Multiple exceptions in yield list", exc_info=True ) else: future_set_exc_info(future, sys.exc_info()) if not future.done(): if keys is not None: future_set_result_unless_cancelled( future, dict(zip(keys, result_list)) ) else: future_set_result_unless_cancelled(future, result_list) listening = set() # type: Set[Future] for f in children_futs: if f not in listening: listening.add(f) future_add_done_callback(f, callback) return future def maybe_future(x: Any) -> Future: """Converts ``x`` into a `.Future`. If ``x`` is already a `.Future`, it is simply returned; otherwise it is wrapped in a new `.Future`. This is suitable for use as ``result = yield gen.maybe_future(f())`` when you don't know whether ``f()`` returns a `.Future` or not. .. deprecated:: 4.3 This function only handles ``Futures``, not other yieldable objects. Instead of `maybe_future`, check for the non-future result types you expect (often just ``None``), and ``yield`` anything unknown. """ if is_future(x): return x else: fut = _create_future() fut.set_result(x) return fut def with_timeout( timeout: Union[float, datetime.timedelta], future: _Yieldable, quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (), ) -> Future: """Wraps a `.Future` (or other yieldable object) in a timeout. Raises `tornado.util.TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) If the wrapped `.Future` fails after it has timed out, the exception will be logged unless it is either of a type contained in ``quiet_exceptions`` (which may be an exception type or a sequence of types), or an ``asyncio.CancelledError``. The wrapped `.Future` is not canceled when the timeout expires, permitting it to be reused. `asyncio.wait_for` is similar to this function but it does cancel the wrapped `.Future` on timeout. .. versionadded:: 4.0 .. versionchanged:: 4.1 Added the ``quiet_exceptions`` argument and the logging of unhandled exceptions. .. versionchanged:: 4.4 Added support for yieldable objects other than `.Future`. .. versionchanged:: 6.0.3 ``asyncio.CancelledError`` is now always considered "quiet". """ # It's tempting to optimize this by cancelling the input future on timeout # instead of creating a new one, but A) we can't know if we are the only # one waiting on the input future, so cancelling it might disrupt other # callers and B) concurrent futures can only be cancelled while they are # in the queue, so cancellation cannot reliably bound our waiting time. future_converted = convert_yielded(future) result = _create_future() chain_future(future_converted, result) io_loop = IOLoop.current() def error_callback(future: Future) -> None: try: future.result() except asyncio.CancelledError: pass except Exception as e: if not isinstance(e, quiet_exceptions): app_log.error( "Exception in Future %r after timeout", future, exc_info=True ) def timeout_callback() -> None: if not result.done(): result.set_exception(TimeoutError("Timeout")) # In case the wrapped future goes on to fail, log it. future_add_done_callback(future_converted, error_callback) timeout_handle = io_loop.add_timeout(timeout, timeout_callback) if isinstance(future_converted, Future): # We know this future will resolve on the IOLoop, so we don't # need the extra thread-safety of IOLoop.add_future (and we also # don't care about StackContext here. future_add_done_callback( future_converted, lambda future: io_loop.remove_timeout(timeout_handle) ) else: # concurrent.futures.Futures may resolve on any thread, so we # need to route them back to the IOLoop. io_loop.add_future( future_converted, lambda future: io_loop.remove_timeout(timeout_handle) ) return result def sleep(duration: float) -> "Future[None]": """Return a `.Future` that resolves after the given number of seconds. When used with ``yield`` in a coroutine, this is a non-blocking analogue to `time.sleep` (which should not be used in coroutines because it is blocking):: yield gen.sleep(0.5) Note that calling this function on its own does nothing; you must wait on the `.Future` it returns (usually by yielding it). .. versionadded:: 4.1 """ f = _create_future() IOLoop.current().call_later( duration, lambda: future_set_result_unless_cancelled(f, None) ) return f class _NullFuture(object): """_NullFuture resembles a Future that finished with a result of None. It's not actually a `Future` to avoid depending on a particular event loop. Handled as a special case in the coroutine runner. We lie and tell the type checker that a _NullFuture is a Future so we don't have to leak _NullFuture into lots of public APIs. But this means that the type checker can't warn us when we're passing a _NullFuture into a code path that doesn't understand what to do with it. """ def result(self) -> None: return None def done(self) -> bool: return True # _null_future is used as a dummy value in the coroutine runner. It differs # from moment in that moment always adds a delay of one IOLoop iteration # while _null_future is processed as soon as possible. _null_future = typing.cast(Future, _NullFuture()) moment = typing.cast(Future, _NullFuture()) moment.__doc__ = """A special object which may be yielded to allow the IOLoop to run for one iteration. This is not needed in normal use but it can be helpful in long-running coroutines that are likely to yield Futures that are ready instantly. Usage: ``yield gen.moment`` In native coroutines, the equivalent of ``yield gen.moment`` is ``await asyncio.sleep(0)``. .. versionadded:: 4.0 .. deprecated:: 4.5 ``yield None`` (or ``yield`` with no argument) is now equivalent to ``yield gen.moment``. """ class Runner(object): """Internal implementation of `tornado.gen.coroutine`. Maintains information about pending callbacks and their results. The results of the generator are stored in ``result_future`` (a `.Future`) """ def __init__( self, ctx_run: Callable, gen: "Generator[_Yieldable, Any, _T]", result_future: "Future[_T]", first_yielded: _Yieldable, ) -> None: self.ctx_run = ctx_run self.gen = gen self.result_future = result_future self.future = _null_future # type: Union[None, Future] self.running = False self.finished = False self.io_loop = IOLoop.current() if self.handle_yield(first_yielded): gen = result_future = first_yielded = None # type: ignore self.ctx_run(self.run) def run(self) -> None: """Starts or resumes the generator, running until it reaches a yield point that is not ready. """ if self.running or self.finished: return try: self.running = True while True: future = self.future if future is None: raise Exception("No pending future") if not future.done(): return self.future = None try: exc_info = None try: value = future.result() except Exception: exc_info = sys.exc_info() future = None if exc_info is not None: try: yielded = self.gen.throw(*exc_info) # type: ignore finally: # Break up a reference to itself # for faster GC on CPython. exc_info = None else: yielded = self.gen.send(value) except (StopIteration, Return) as e: self.finished = True self.future = _null_future future_set_result_unless_cancelled( self.result_future, _value_from_stopiteration(e) ) self.result_future = None # type: ignore return except Exception: self.finished = True self.future = _null_future future_set_exc_info(self.result_future, sys.exc_info()) self.result_future = None # type: ignore return if not self.handle_yield(yielded): return yielded = None finally: self.running = False def handle_yield(self, yielded: _Yieldable) -> bool: try: self.future = convert_yielded(yielded) except BadYieldError: self.future = Future() future_set_exc_info(self.future, sys.exc_info()) if self.future is moment: self.io_loop.add_callback(self.ctx_run, self.run) return False elif self.future is None: raise Exception("no pending future") elif not self.future.done(): def inner(f: Any) -> None: # Break a reference cycle to speed GC. f = None # noqa: F841 self.ctx_run(self.run) self.io_loop.add_future(self.future, inner) return False return True def handle_exception( self, typ: Type[Exception], value: Exception, tb: types.TracebackType ) -> bool: if not self.running and not self.finished: self.future = Future() future_set_exc_info(self.future, (typ, value, tb)) self.ctx_run(self.run) return True else: return False # Convert Awaitables into Futures. try: _wrap_awaitable = asyncio.ensure_future except AttributeError: # asyncio.ensure_future was introduced in Python 3.4.4, but # Debian jessie still ships with 3.4.2 so try the old name. _wrap_awaitable = getattr(asyncio, "async") def convert_yielded(yielded: _Yieldable) -> Future: """Convert a yielded object into a `.Future`. The default implementation accepts lists, dictionaries, and Futures. This has the side effect of starting any coroutines that did not start themselves, similar to `asyncio.ensure_future`. If the `~functools.singledispatch` library is available, this function may be extended to support additional types. For example:: @convert_yielded.register(asyncio.Future) def _(asyncio_future): return tornado.platform.asyncio.to_tornado_future(asyncio_future) .. versionadded:: 4.1 """ if yielded is None or yielded is moment: return moment elif yielded is _null_future: return _null_future elif isinstance(yielded, (list, dict)): return multi(yielded) # type: ignore elif is_future(yielded): return typing.cast(Future, yielded) elif isawaitable(yielded): return _wrap_awaitable(yielded) # type: ignore else: raise BadYieldError("yielded unknown object %r" % (yielded,)) convert_yielded = singledispatch(convert_yielded) tornado-6.1.0/tornado/http1connection.py000066400000000000000000001064021374705040500203340ustar00rootroot00000000000000# # Copyright 2014 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Client and server implementations of HTTP/1.x. .. versionadded:: 4.0 """ import asyncio import logging import re import types from tornado.concurrent import ( Future, future_add_done_callback, future_set_result_unless_cancelled, ) from tornado.escape import native_str, utf8 from tornado import gen from tornado import httputil from tornado import iostream from tornado.log import gen_log, app_log from tornado.util import GzipDecompressor from typing import cast, Optional, Type, Awaitable, Callable, Union, Tuple class _QuietException(Exception): def __init__(self) -> None: pass class _ExceptionLoggingContext(object): """Used with the ``with`` statement when calling delegate methods to log any exceptions with the given logger. Any exceptions caught are converted to _QuietException """ def __init__(self, logger: logging.Logger) -> None: self.logger = logger def __enter__(self) -> None: pass def __exit__( self, typ: "Optional[Type[BaseException]]", value: Optional[BaseException], tb: types.TracebackType, ) -> None: if value is not None: assert typ is not None self.logger.error("Uncaught exception", exc_info=(typ, value, tb)) raise _QuietException class HTTP1ConnectionParameters(object): """Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`. """ def __init__( self, no_keep_alive: bool = False, chunk_size: Optional[int] = None, max_header_size: Optional[int] = None, header_timeout: Optional[float] = None, max_body_size: Optional[int] = None, body_timeout: Optional[float] = None, decompress: bool = False, ) -> None: """ :arg bool no_keep_alive: If true, always close the connection after one request. :arg int chunk_size: how much data to read into memory at once :arg int max_header_size: maximum amount of data for HTTP headers :arg float header_timeout: how long to wait for all headers (seconds) :arg int max_body_size: maximum amount of data for body :arg float body_timeout: how long to wait while reading body (seconds) :arg bool decompress: if true, decode incoming ``Content-Encoding: gzip`` """ self.no_keep_alive = no_keep_alive self.chunk_size = chunk_size or 65536 self.max_header_size = max_header_size or 65536 self.header_timeout = header_timeout self.max_body_size = max_body_size self.body_timeout = body_timeout self.decompress = decompress class HTTP1Connection(httputil.HTTPConnection): """Implements the HTTP/1.x protocol. This class can be on its own for clients, or via `HTTP1ServerConnection` for servers. """ def __init__( self, stream: iostream.IOStream, is_client: bool, params: Optional[HTTP1ConnectionParameters] = None, context: Optional[object] = None, ) -> None: """ :arg stream: an `.IOStream` :arg bool is_client: client or server :arg params: a `.HTTP1ConnectionParameters` instance or ``None`` :arg context: an opaque application-defined object that can be accessed as ``connection.context``. """ self.is_client = is_client self.stream = stream if params is None: params = HTTP1ConnectionParameters() self.params = params self.context = context self.no_keep_alive = params.no_keep_alive # The body limits can be altered by the delegate, so save them # here instead of just referencing self.params later. self._max_body_size = self.params.max_body_size or self.stream.max_buffer_size self._body_timeout = self.params.body_timeout # _write_finished is set to True when finish() has been called, # i.e. there will be no more data sent. Data may still be in the # stream's write buffer. self._write_finished = False # True when we have read the entire incoming body. self._read_finished = False # _finish_future resolves when all data has been written and flushed # to the IOStream. self._finish_future = Future() # type: Future[None] # If true, the connection should be closed after this request # (after the response has been written in the server side, # and after it has been read in the client) self._disconnect_on_finish = False self._clear_callbacks() # Save the start lines after we read or write them; they # affect later processing (e.g. 304 responses and HEAD methods # have content-length but no bodies) self._request_start_line = None # type: Optional[httputil.RequestStartLine] self._response_start_line = None # type: Optional[httputil.ResponseStartLine] self._request_headers = None # type: Optional[httputil.HTTPHeaders] # True if we are writing output with chunked encoding. self._chunking_output = False # While reading a body with a content-length, this is the # amount left to read. self._expected_content_remaining = None # type: Optional[int] # A Future for our outgoing writes, returned by IOStream.write. self._pending_write = None # type: Optional[Future[None]] def read_response(self, delegate: httputil.HTTPMessageDelegate) -> Awaitable[bool]: """Read a single HTTP response. Typical client-mode usage is to write a request using `write_headers`, `write`, and `finish`, and then call ``read_response``. :arg delegate: a `.HTTPMessageDelegate` Returns a `.Future` that resolves to a bool after the full response has been read. The result is true if the stream is still open. """ if self.params.decompress: delegate = _GzipMessageDelegate(delegate, self.params.chunk_size) return self._read_message(delegate) async def _read_message(self, delegate: httputil.HTTPMessageDelegate) -> bool: need_delegate_close = False try: header_future = self.stream.read_until_regex( b"\r?\n\r?\n", max_bytes=self.params.max_header_size ) if self.params.header_timeout is None: header_data = await header_future else: try: header_data = await gen.with_timeout( self.stream.io_loop.time() + self.params.header_timeout, header_future, quiet_exceptions=iostream.StreamClosedError, ) except gen.TimeoutError: self.close() return False start_line_str, headers = self._parse_headers(header_data) if self.is_client: resp_start_line = httputil.parse_response_start_line(start_line_str) self._response_start_line = resp_start_line start_line = ( resp_start_line ) # type: Union[httputil.RequestStartLine, httputil.ResponseStartLine] # TODO: this will need to change to support client-side keepalive self._disconnect_on_finish = False else: req_start_line = httputil.parse_request_start_line(start_line_str) self._request_start_line = req_start_line self._request_headers = headers start_line = req_start_line self._disconnect_on_finish = not self._can_keep_alive( req_start_line, headers ) need_delegate_close = True with _ExceptionLoggingContext(app_log): header_recv_future = delegate.headers_received(start_line, headers) if header_recv_future is not None: await header_recv_future if self.stream is None: # We've been detached. need_delegate_close = False return False skip_body = False if self.is_client: assert isinstance(start_line, httputil.ResponseStartLine) if ( self._request_start_line is not None and self._request_start_line.method == "HEAD" ): skip_body = True code = start_line.code if code == 304: # 304 responses may include the content-length header # but do not actually have a body. # http://tools.ietf.org/html/rfc7230#section-3.3 skip_body = True if 100 <= code < 200: # 1xx responses should never indicate the presence of # a body. if "Content-Length" in headers or "Transfer-Encoding" in headers: raise httputil.HTTPInputError( "Response code %d cannot have body" % code ) # TODO: client delegates will get headers_received twice # in the case of a 100-continue. Document or change? await self._read_message(delegate) else: if headers.get("Expect") == "100-continue" and not self._write_finished: self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n") if not skip_body: body_future = self._read_body( resp_start_line.code if self.is_client else 0, headers, delegate ) if body_future is not None: if self._body_timeout is None: await body_future else: try: await gen.with_timeout( self.stream.io_loop.time() + self._body_timeout, body_future, quiet_exceptions=iostream.StreamClosedError, ) except gen.TimeoutError: gen_log.info("Timeout reading body from %s", self.context) self.stream.close() return False self._read_finished = True if not self._write_finished or self.is_client: need_delegate_close = False with _ExceptionLoggingContext(app_log): delegate.finish() # If we're waiting for the application to produce an asynchronous # response, and we're not detached, register a close callback # on the stream (we didn't need one while we were reading) if ( not self._finish_future.done() and self.stream is not None and not self.stream.closed() ): self.stream.set_close_callback(self._on_connection_close) await self._finish_future if self.is_client and self._disconnect_on_finish: self.close() if self.stream is None: return False except httputil.HTTPInputError as e: gen_log.info("Malformed HTTP message from %s: %s", self.context, e) if not self.is_client: await self.stream.write(b"HTTP/1.1 400 Bad Request\r\n\r\n") self.close() return False finally: if need_delegate_close: with _ExceptionLoggingContext(app_log): delegate.on_connection_close() header_future = None # type: ignore self._clear_callbacks() return True def _clear_callbacks(self) -> None: """Clears the callback attributes. This allows the request handler to be garbage collected more quickly in CPython by breaking up reference cycles. """ self._write_callback = None self._write_future = None # type: Optional[Future[None]] self._close_callback = None # type: Optional[Callable[[], None]] if self.stream is not None: self.stream.set_close_callback(None) def set_close_callback(self, callback: Optional[Callable[[], None]]) -> None: """Sets a callback that will be run when the connection is closed. Note that this callback is slightly different from `.HTTPMessageDelegate.on_connection_close`: The `.HTTPMessageDelegate` method is called when the connection is closed while receiving a message. This callback is used when there is not an active delegate (for example, on the server side this callback is used if the client closes the connection after sending its request but before receiving all the response. """ self._close_callback = callback def _on_connection_close(self) -> None: # Note that this callback is only registered on the IOStream # when we have finished reading the request and are waiting for # the application to produce its response. if self._close_callback is not None: callback = self._close_callback self._close_callback = None callback() if not self._finish_future.done(): future_set_result_unless_cancelled(self._finish_future, None) self._clear_callbacks() def close(self) -> None: if self.stream is not None: self.stream.close() self._clear_callbacks() if not self._finish_future.done(): future_set_result_unless_cancelled(self._finish_future, None) def detach(self) -> iostream.IOStream: """Take control of the underlying stream. Returns the underlying `.IOStream` object and stops all further HTTP processing. May only be called during `.HTTPMessageDelegate.headers_received`. Intended for implementing protocols like websockets that tunnel over an HTTP handshake. """ self._clear_callbacks() stream = self.stream self.stream = None # type: ignore if not self._finish_future.done(): future_set_result_unless_cancelled(self._finish_future, None) return stream def set_body_timeout(self, timeout: float) -> None: """Sets the body timeout for a single request. Overrides the value from `.HTTP1ConnectionParameters`. """ self._body_timeout = timeout def set_max_body_size(self, max_body_size: int) -> None: """Sets the body size limit for a single request. Overrides the value from `.HTTP1ConnectionParameters`. """ self._max_body_size = max_body_size def write_headers( self, start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], headers: httputil.HTTPHeaders, chunk: Optional[bytes] = None, ) -> "Future[None]": """Implements `.HTTPConnection.write_headers`.""" lines = [] if self.is_client: assert isinstance(start_line, httputil.RequestStartLine) self._request_start_line = start_line lines.append(utf8("%s %s HTTP/1.1" % (start_line[0], start_line[1]))) # Client requests with a non-empty body must have either a # Content-Length or a Transfer-Encoding. self._chunking_output = ( start_line.method in ("POST", "PUT", "PATCH") and "Content-Length" not in headers and ( "Transfer-Encoding" not in headers or headers["Transfer-Encoding"] == "chunked" ) ) else: assert isinstance(start_line, httputil.ResponseStartLine) assert self._request_start_line is not None assert self._request_headers is not None self._response_start_line = start_line lines.append(utf8("HTTP/1.1 %d %s" % (start_line[1], start_line[2]))) self._chunking_output = ( # TODO: should this use # self._request_start_line.version or # start_line.version? self._request_start_line.version == "HTTP/1.1" # Omit payload header field for HEAD request. and self._request_start_line.method != "HEAD" # 1xx, 204 and 304 responses have no body (not even a zero-length # body), and so should not have either Content-Length or # Transfer-Encoding headers. and start_line.code not in (204, 304) and (start_line.code < 100 or start_line.code >= 200) # No need to chunk the output if a Content-Length is specified. and "Content-Length" not in headers # Applications are discouraged from touching Transfer-Encoding, # but if they do, leave it alone. and "Transfer-Encoding" not in headers ) # If connection to a 1.1 client will be closed, inform client if ( self._request_start_line.version == "HTTP/1.1" and self._disconnect_on_finish ): headers["Connection"] = "close" # If a 1.0 client asked for keep-alive, add the header. if ( self._request_start_line.version == "HTTP/1.0" and self._request_headers.get("Connection", "").lower() == "keep-alive" ): headers["Connection"] = "Keep-Alive" if self._chunking_output: headers["Transfer-Encoding"] = "chunked" if not self.is_client and ( self._request_start_line.method == "HEAD" or cast(httputil.ResponseStartLine, start_line).code == 304 ): self._expected_content_remaining = 0 elif "Content-Length" in headers: self._expected_content_remaining = int(headers["Content-Length"]) else: self._expected_content_remaining = None # TODO: headers are supposed to be of type str, but we still have some # cases that let bytes slip through. Remove these native_str calls when those # are fixed. header_lines = ( native_str(n) + ": " + native_str(v) for n, v in headers.get_all() ) lines.extend(line.encode("latin1") for line in header_lines) for line in lines: if b"\n" in line: raise ValueError("Newline in header: " + repr(line)) future = None if self.stream.closed(): future = self._write_future = Future() future.set_exception(iostream.StreamClosedError()) future.exception() else: future = self._write_future = Future() data = b"\r\n".join(lines) + b"\r\n\r\n" if chunk: data += self._format_chunk(chunk) self._pending_write = self.stream.write(data) future_add_done_callback(self._pending_write, self._on_write_complete) return future def _format_chunk(self, chunk: bytes) -> bytes: if self._expected_content_remaining is not None: self._expected_content_remaining -= len(chunk) if self._expected_content_remaining < 0: # Close the stream now to stop further framing errors. self.stream.close() raise httputil.HTTPOutputError( "Tried to write more data than Content-Length" ) if self._chunking_output and chunk: # Don't write out empty chunks because that means END-OF-STREAM # with chunked encoding return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n" else: return chunk def write(self, chunk: bytes) -> "Future[None]": """Implements `.HTTPConnection.write`. For backwards compatibility it is allowed but deprecated to skip `write_headers` and instead call `write()` with a pre-encoded header block. """ future = None if self.stream.closed(): future = self._write_future = Future() self._write_future.set_exception(iostream.StreamClosedError()) self._write_future.exception() else: future = self._write_future = Future() self._pending_write = self.stream.write(self._format_chunk(chunk)) future_add_done_callback(self._pending_write, self._on_write_complete) return future def finish(self) -> None: """Implements `.HTTPConnection.finish`.""" if ( self._expected_content_remaining is not None and self._expected_content_remaining != 0 and not self.stream.closed() ): self.stream.close() raise httputil.HTTPOutputError( "Tried to write %d bytes less than Content-Length" % self._expected_content_remaining ) if self._chunking_output: if not self.stream.closed(): self._pending_write = self.stream.write(b"0\r\n\r\n") self._pending_write.add_done_callback(self._on_write_complete) self._write_finished = True # If the app finished the request while we're still reading, # divert any remaining data away from the delegate and # close the connection when we're done sending our response. # Closing the connection is the only way to avoid reading the # whole input body. if not self._read_finished: self._disconnect_on_finish = True # No more data is coming, so instruct TCP to send any remaining # data immediately instead of waiting for a full packet or ack. self.stream.set_nodelay(True) if self._pending_write is None: self._finish_request(None) else: future_add_done_callback(self._pending_write, self._finish_request) def _on_write_complete(self, future: "Future[None]") -> None: exc = future.exception() if exc is not None and not isinstance(exc, iostream.StreamClosedError): future.result() if self._write_callback is not None: callback = self._write_callback self._write_callback = None self.stream.io_loop.add_callback(callback) if self._write_future is not None: future = self._write_future self._write_future = None future_set_result_unless_cancelled(future, None) def _can_keep_alive( self, start_line: httputil.RequestStartLine, headers: httputil.HTTPHeaders ) -> bool: if self.params.no_keep_alive: return False connection_header = headers.get("Connection") if connection_header is not None: connection_header = connection_header.lower() if start_line.version == "HTTP/1.1": return connection_header != "close" elif ( "Content-Length" in headers or headers.get("Transfer-Encoding", "").lower() == "chunked" or getattr(start_line, "method", None) in ("HEAD", "GET") ): # start_line may be a request or response start line; only # the former has a method attribute. return connection_header == "keep-alive" return False def _finish_request(self, future: "Optional[Future[None]]") -> None: self._clear_callbacks() if not self.is_client and self._disconnect_on_finish: self.close() return # Turn Nagle's algorithm back on, leaving the stream in its # default state for the next request. self.stream.set_nodelay(False) if not self._finish_future.done(): future_set_result_unless_cancelled(self._finish_future, None) def _parse_headers(self, data: bytes) -> Tuple[str, httputil.HTTPHeaders]: # The lstrip removes newlines that some implementations sometimes # insert between messages of a reused connection. Per RFC 7230, # we SHOULD ignore at least one empty line before the request. # http://tools.ietf.org/html/rfc7230#section-3.5 data_str = native_str(data.decode("latin1")).lstrip("\r\n") # RFC 7230 section allows for both CRLF and bare LF. eol = data_str.find("\n") start_line = data_str[:eol].rstrip("\r") headers = httputil.HTTPHeaders.parse(data_str[eol:]) return start_line, headers def _read_body( self, code: int, headers: httputil.HTTPHeaders, delegate: httputil.HTTPMessageDelegate, ) -> Optional[Awaitable[None]]: if "Content-Length" in headers: if "Transfer-Encoding" in headers: # Response cannot contain both Content-Length and # Transfer-Encoding headers. # http://tools.ietf.org/html/rfc7230#section-3.3.3 raise httputil.HTTPInputError( "Response with both Transfer-Encoding and Content-Length" ) if "," in headers["Content-Length"]: # Proxies sometimes cause Content-Length headers to get # duplicated. If all the values are identical then we can # use them but if they differ it's an error. pieces = re.split(r",\s*", headers["Content-Length"]) if any(i != pieces[0] for i in pieces): raise httputil.HTTPInputError( "Multiple unequal Content-Lengths: %r" % headers["Content-Length"] ) headers["Content-Length"] = pieces[0] try: content_length = int(headers["Content-Length"]) # type: Optional[int] except ValueError: # Handles non-integer Content-Length value. raise httputil.HTTPInputError( "Only integer Content-Length is allowed: %s" % headers["Content-Length"] ) if cast(int, content_length) > self._max_body_size: raise httputil.HTTPInputError("Content-Length too long") else: content_length = None if code == 204: # This response code is not allowed to have a non-empty body, # and has an implicit length of zero instead of read-until-close. # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3 if "Transfer-Encoding" in headers or content_length not in (None, 0): raise httputil.HTTPInputError( "Response with code %d should not have body" % code ) content_length = 0 if content_length is not None: return self._read_fixed_body(content_length, delegate) if headers.get("Transfer-Encoding", "").lower() == "chunked": return self._read_chunked_body(delegate) if self.is_client: return self._read_body_until_close(delegate) return None async def _read_fixed_body( self, content_length: int, delegate: httputil.HTTPMessageDelegate ) -> None: while content_length > 0: body = await self.stream.read_bytes( min(self.params.chunk_size, content_length), partial=True ) content_length -= len(body) if not self._write_finished or self.is_client: with _ExceptionLoggingContext(app_log): ret = delegate.data_received(body) if ret is not None: await ret async def _read_chunked_body(self, delegate: httputil.HTTPMessageDelegate) -> None: # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1 total_size = 0 while True: chunk_len_str = await self.stream.read_until(b"\r\n", max_bytes=64) chunk_len = int(chunk_len_str.strip(), 16) if chunk_len == 0: crlf = await self.stream.read_bytes(2) if crlf != b"\r\n": raise httputil.HTTPInputError( "improperly terminated chunked request" ) return total_size += chunk_len if total_size > self._max_body_size: raise httputil.HTTPInputError("chunked body too large") bytes_to_read = chunk_len while bytes_to_read: chunk = await self.stream.read_bytes( min(bytes_to_read, self.params.chunk_size), partial=True ) bytes_to_read -= len(chunk) if not self._write_finished or self.is_client: with _ExceptionLoggingContext(app_log): ret = delegate.data_received(chunk) if ret is not None: await ret # chunk ends with \r\n crlf = await self.stream.read_bytes(2) assert crlf == b"\r\n" async def _read_body_until_close( self, delegate: httputil.HTTPMessageDelegate ) -> None: body = await self.stream.read_until_close() if not self._write_finished or self.is_client: with _ExceptionLoggingContext(app_log): ret = delegate.data_received(body) if ret is not None: await ret class _GzipMessageDelegate(httputil.HTTPMessageDelegate): """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``. """ def __init__(self, delegate: httputil.HTTPMessageDelegate, chunk_size: int) -> None: self._delegate = delegate self._chunk_size = chunk_size self._decompressor = None # type: Optional[GzipDecompressor] def headers_received( self, start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], headers: httputil.HTTPHeaders, ) -> Optional[Awaitable[None]]: if headers.get("Content-Encoding") == "gzip": self._decompressor = GzipDecompressor() # Downstream delegates will only see uncompressed data, # so rename the content-encoding header. # (but note that curl_httpclient doesn't do this). headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"]) del headers["Content-Encoding"] return self._delegate.headers_received(start_line, headers) async def data_received(self, chunk: bytes) -> None: if self._decompressor: compressed_data = chunk while compressed_data: decompressed = self._decompressor.decompress( compressed_data, self._chunk_size ) if decompressed: ret = self._delegate.data_received(decompressed) if ret is not None: await ret compressed_data = self._decompressor.unconsumed_tail if compressed_data and not decompressed: raise httputil.HTTPInputError( "encountered unconsumed gzip data without making progress" ) else: ret = self._delegate.data_received(chunk) if ret is not None: await ret def finish(self) -> None: if self._decompressor is not None: tail = self._decompressor.flush() if tail: # The tail should always be empty: decompress returned # all that it can in data_received and the only # purpose of the flush call is to detect errors such # as truncated input. If we did legitimately get a new # chunk at this point we'd need to change the # interface to make finish() a coroutine. raise ValueError( "decompressor.flush returned data; possible truncated input" ) return self._delegate.finish() def on_connection_close(self) -> None: return self._delegate.on_connection_close() class HTTP1ServerConnection(object): """An HTTP/1.x server.""" def __init__( self, stream: iostream.IOStream, params: Optional[HTTP1ConnectionParameters] = None, context: Optional[object] = None, ) -> None: """ :arg stream: an `.IOStream` :arg params: a `.HTTP1ConnectionParameters` or None :arg context: an opaque application-defined object that is accessible as ``connection.context`` """ self.stream = stream if params is None: params = HTTP1ConnectionParameters() self.params = params self.context = context self._serving_future = None # type: Optional[Future[None]] async def close(self) -> None: """Closes the connection. Returns a `.Future` that resolves after the serving loop has exited. """ self.stream.close() # Block until the serving loop is done, but ignore any exceptions # (start_serving is already responsible for logging them). assert self._serving_future is not None try: await self._serving_future except Exception: pass def start_serving(self, delegate: httputil.HTTPServerConnectionDelegate) -> None: """Starts serving requests on this connection. :arg delegate: a `.HTTPServerConnectionDelegate` """ assert isinstance(delegate, httputil.HTTPServerConnectionDelegate) fut = gen.convert_yielded(self._server_request_loop(delegate)) self._serving_future = fut # Register the future on the IOLoop so its errors get logged. self.stream.io_loop.add_future(fut, lambda f: f.result()) async def _server_request_loop( self, delegate: httputil.HTTPServerConnectionDelegate ) -> None: try: while True: conn = HTTP1Connection(self.stream, False, self.params, self.context) request_delegate = delegate.start_request(self, conn) try: ret = await conn.read_response(request_delegate) except ( iostream.StreamClosedError, iostream.UnsatisfiableReadError, asyncio.CancelledError, ): return except _QuietException: # This exception was already logged. conn.close() return except Exception: gen_log.error("Uncaught exception", exc_info=True) conn.close() return if not ret: return await asyncio.sleep(0) finally: delegate.on_close(self) tornado-6.1.0/tornado/httpclient.py000066400000000000000000000762571374705040500174100ustar00rootroot00000000000000"""Blocking and non-blocking HTTP client interfaces. This module defines a common interface shared by two implementations, ``simple_httpclient`` and ``curl_httpclient``. Applications may either instantiate their chosen implementation class directly or use the `AsyncHTTPClient` class from this module, which selects an implementation that can be overridden with the `AsyncHTTPClient.configure` method. The default implementation is ``simple_httpclient``, and this is expected to be suitable for most users' needs. However, some applications may wish to switch to ``curl_httpclient`` for reasons such as the following: * ``curl_httpclient`` has some features not found in ``simple_httpclient``, including support for HTTP proxies and the ability to use a specified network interface. * ``curl_httpclient`` is more likely to be compatible with sites that are not-quite-compliant with the HTTP spec, or sites that use little-exercised features of HTTP. * ``curl_httpclient`` is faster. Note that if you are using ``curl_httpclient``, it is highly recommended that you use a recent version of ``libcurl`` and ``pycurl``. Currently the minimum supported version of libcurl is 7.22.0, and the minimum version of pycurl is 7.18.2. It is highly recommended that your ``libcurl`` installation is built with asynchronous DNS resolver (threaded or c-ares), otherwise you may encounter various problems with request timeouts (for more information, see http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS and comments in curl_httpclient.py). To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup:: AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") """ import datetime import functools from io import BytesIO import ssl import time import weakref from tornado.concurrent import ( Future, future_set_result_unless_cancelled, future_set_exception_unless_cancelled, ) from tornado.escape import utf8, native_str from tornado import gen, httputil from tornado.ioloop import IOLoop from tornado.util import Configurable from typing import Type, Any, Union, Dict, Callable, Optional, cast class HTTPClient(object): """A blocking HTTP client. This interface is provided to make it easier to share code between synchronous and asynchronous applications. Applications that are running an `.IOLoop` must use `AsyncHTTPClient` instead. Typical usage looks like this:: http_client = httpclient.HTTPClient() try: response = http_client.fetch("http://www.google.com/") print(response.body) except httpclient.HTTPError as e: # HTTPError is raised for non-200 responses; the response # can be found in e.response. print("Error: " + str(e)) except Exception as e: # Other errors are possible, such as IOError. print("Error: " + str(e)) http_client.close() .. versionchanged:: 5.0 Due to limitations in `asyncio`, it is no longer possible to use the synchronous ``HTTPClient`` while an `.IOLoop` is running. Use `AsyncHTTPClient` instead. """ def __init__( self, async_client_class: "Optional[Type[AsyncHTTPClient]]" = None, **kwargs: Any ) -> None: # Initialize self._closed at the beginning of the constructor # so that an exception raised here doesn't lead to confusing # failures in __del__. self._closed = True self._io_loop = IOLoop(make_current=False) if async_client_class is None: async_client_class = AsyncHTTPClient # Create the client while our IOLoop is "current", without # clobbering the thread's real current IOLoop (if any). async def make_client() -> "AsyncHTTPClient": await gen.sleep(0) assert async_client_class is not None return async_client_class(**kwargs) self._async_client = self._io_loop.run_sync(make_client) self._closed = False def __del__(self) -> None: self.close() def close(self) -> None: """Closes the HTTPClient, freeing any resources used.""" if not self._closed: self._async_client.close() self._io_loop.close() self._closed = True def fetch( self, request: Union["HTTPRequest", str], **kwargs: Any ) -> "HTTPResponse": """Executes a request, returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` If an error occurs during the fetch, we raise an `HTTPError` unless the ``raise_error`` keyword argument is set to False. """ response = self._io_loop.run_sync( functools.partial(self._async_client.fetch, request, **kwargs) ) return response class AsyncHTTPClient(Configurable): """An non-blocking HTTP client. Example usage:: async def f(): http_client = AsyncHTTPClient() try: response = await http_client.fetch("http://www.google.com") except Exception as e: print("Error: %s" % e) else: print(response.body) The constructor for this class is magic in several respects: It actually creates an instance of an implementation-specific subclass, and instances are reused as a kind of pseudo-singleton (one per `.IOLoop`). The keyword argument ``force_instance=True`` can be used to suppress this singleton behavior. Unless ``force_instance=True`` is used, no arguments should be passed to the `AsyncHTTPClient` constructor. The implementation subclass as well as arguments to its constructor can be set with the static method `configure()` All `AsyncHTTPClient` implementations support a ``defaults`` keyword argument, which can be used to set default values for `HTTPRequest` attributes. For example:: AsyncHTTPClient.configure( None, defaults=dict(user_agent="MyUserAgent")) # or with force_instance: client = AsyncHTTPClient(force_instance=True, defaults=dict(user_agent="MyUserAgent")) .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ _instance_cache = None # type: Dict[IOLoop, AsyncHTTPClient] @classmethod def configurable_base(cls) -> Type[Configurable]: return AsyncHTTPClient @classmethod def configurable_default(cls) -> Type[Configurable]: from tornado.simple_httpclient import SimpleAsyncHTTPClient return SimpleAsyncHTTPClient @classmethod def _async_clients(cls) -> Dict[IOLoop, "AsyncHTTPClient"]: attr_name = "_async_client_dict_" + cls.__name__ if not hasattr(cls, attr_name): setattr(cls, attr_name, weakref.WeakKeyDictionary()) return getattr(cls, attr_name) def __new__(cls, force_instance: bool = False, **kwargs: Any) -> "AsyncHTTPClient": io_loop = IOLoop.current() if force_instance: instance_cache = None else: instance_cache = cls._async_clients() if instance_cache is not None and io_loop in instance_cache: return instance_cache[io_loop] instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs) # type: ignore # Make sure the instance knows which cache to remove itself from. # It can't simply call _async_clients() because we may be in # __new__(AsyncHTTPClient) but instance.__class__ may be # SimpleAsyncHTTPClient. instance._instance_cache = instance_cache if instance_cache is not None: instance_cache[instance.io_loop] = instance return instance def initialize(self, defaults: Optional[Dict[str, Any]] = None) -> None: self.io_loop = IOLoop.current() self.defaults = dict(HTTPRequest._DEFAULTS) if defaults is not None: self.defaults.update(defaults) self._closed = False def close(self) -> None: """Destroys this HTTP client, freeing any file descriptors used. This method is **not needed in normal use** due to the way that `AsyncHTTPClient` objects are transparently reused. ``close()`` is generally only necessary when either the `.IOLoop` is also being closed, or the ``force_instance=True`` argument was used when creating the `AsyncHTTPClient`. No other methods may be called on the `AsyncHTTPClient` after ``close()``. """ if self._closed: return self._closed = True if self._instance_cache is not None: cached_val = self._instance_cache.pop(self.io_loop, None) # If there's an object other than self in the instance # cache for our IOLoop, something has gotten mixed up. A # value of None appears to be possible when this is called # from a destructor (HTTPClient.__del__) as the weakref # gets cleared before the destructor runs. if cached_val is not None and cached_val is not self: raise RuntimeError("inconsistent AsyncHTTPClient cache") def fetch( self, request: Union[str, "HTTPRequest"], raise_error: bool = True, **kwargs: Any ) -> "Future[HTTPResponse]": """Executes a request, asynchronously returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` This method returns a `.Future` whose result is an `HTTPResponse`. By default, the ``Future`` will raise an `HTTPError` if the request returned a non-200 response code (other errors may also be raised if the server could not be contacted). Instead, if ``raise_error`` is set to False, the response will always be returned regardless of the response code. If a ``callback`` is given, it will be invoked with the `HTTPResponse`. In the callback interface, `HTTPError` is not automatically raised. Instead, you must check the response's ``error`` attribute or call its `~HTTPResponse.rethrow` method. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. The ``raise_error=False`` argument only affects the `HTTPError` raised when a non-200 response code is used, instead of suppressing all errors. """ if self._closed: raise RuntimeError("fetch() called on closed AsyncHTTPClient") if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) else: if kwargs: raise ValueError( "kwargs can't be used if request is an HTTPRequest object" ) # We may modify this (to add Host, Accept-Encoding, etc), # so make sure we don't modify the caller's object. This is also # where normal dicts get converted to HTTPHeaders objects. request.headers = httputil.HTTPHeaders(request.headers) request_proxy = _RequestProxy(request, self.defaults) future = Future() # type: Future[HTTPResponse] def handle_response(response: "HTTPResponse") -> None: if response.error: if raise_error or not response._error_is_response_code: future_set_exception_unless_cancelled(future, response.error) return future_set_result_unless_cancelled(future, response) self.fetch_impl(cast(HTTPRequest, request_proxy), handle_response) return future def fetch_impl( self, request: "HTTPRequest", callback: Callable[["HTTPResponse"], None] ) -> None: raise NotImplementedError() @classmethod def configure( cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any ) -> None: """Configures the `AsyncHTTPClient` subclass to use. ``AsyncHTTPClient()`` actually creates an instance of a subclass. This method may be called with either a class object or the fully-qualified name of such a class (or ``None`` to use the default, ``SimpleAsyncHTTPClient``) If additional keyword arguments are given, they will be passed to the constructor of each subclass instance created. The keyword argument ``max_clients`` determines the maximum number of simultaneous `~AsyncHTTPClient.fetch()` operations that can execute in parallel on each `.IOLoop`. Additional arguments may be supported depending on the implementation class in use. Example:: AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") """ super(AsyncHTTPClient, cls).configure(impl, **kwargs) class HTTPRequest(object): """HTTP client request object.""" _headers = None # type: Union[Dict[str, str], httputil.HTTPHeaders] # Default values for HTTPRequest parameters. # Merged with the values on the request object by AsyncHTTPClient # implementations. _DEFAULTS = dict( connect_timeout=20.0, request_timeout=20.0, follow_redirects=True, max_redirects=5, decompress_response=True, proxy_password="", allow_nonstandard_methods=False, validate_cert=True, ) def __init__( self, url: str, method: str = "GET", headers: Optional[Union[Dict[str, str], httputil.HTTPHeaders]] = None, body: Optional[Union[bytes, str]] = None, auth_username: Optional[str] = None, auth_password: Optional[str] = None, auth_mode: Optional[str] = None, connect_timeout: Optional[float] = None, request_timeout: Optional[float] = None, if_modified_since: Optional[Union[float, datetime.datetime]] = None, follow_redirects: Optional[bool] = None, max_redirects: Optional[int] = None, user_agent: Optional[str] = None, use_gzip: Optional[bool] = None, network_interface: Optional[str] = None, streaming_callback: Optional[Callable[[bytes], None]] = None, header_callback: Optional[Callable[[str], None]] = None, prepare_curl_callback: Optional[Callable[[Any], None]] = None, proxy_host: Optional[str] = None, proxy_port: Optional[int] = None, proxy_username: Optional[str] = None, proxy_password: Optional[str] = None, proxy_auth_mode: Optional[str] = None, allow_nonstandard_methods: Optional[bool] = None, validate_cert: Optional[bool] = None, ca_certs: Optional[str] = None, allow_ipv6: Optional[bool] = None, client_key: Optional[str] = None, client_cert: Optional[str] = None, body_producer: Optional[ Callable[[Callable[[bytes], None]], "Future[None]"] ] = None, expect_100_continue: bool = False, decompress_response: Optional[bool] = None, ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None, ) -> None: r"""All parameters except ``url`` are optional. :arg str url: URL to fetch :arg str method: HTTP method, e.g. "GET" or "POST" :arg headers: Additional HTTP headers to pass on the request :type headers: `~tornado.httputil.HTTPHeaders` or `dict` :arg body: HTTP request body as a string (byte or unicode; if unicode the utf-8 encoding will be used) :type body: `str` or `bytes` :arg collections.abc.Callable body_producer: Callable used for lazy/asynchronous request bodies. It is called with one argument, a ``write`` function, and should return a `.Future`. It should call the write function with new data as it becomes available. The write function returns a `.Future` which can be used for flow control. Only one of ``body`` and ``body_producer`` may be specified. ``body_producer`` is not supported on ``curl_httpclient``. When using ``body_producer`` it is recommended to pass a ``Content-Length`` in the headers as otherwise chunked encoding will be used, and many servers do not support chunked encoding on requests. New in Tornado 4.0 :arg str auth_username: Username for HTTP authentication :arg str auth_password: Password for HTTP authentication :arg str auth_mode: Authentication mode; default is "basic". Allowed values are implementation-defined; ``curl_httpclient`` supports "basic" and "digest"; ``simple_httpclient`` only supports "basic" :arg float connect_timeout: Timeout for initial connection in seconds, default 20 seconds (0 means no timeout) :arg float request_timeout: Timeout for entire request in seconds, default 20 seconds (0 means no timeout) :arg if_modified_since: Timestamp for ``If-Modified-Since`` header :type if_modified_since: `datetime` or `float` :arg bool follow_redirects: Should redirects be followed automatically or return the 3xx response? Default True. :arg int max_redirects: Limit for ``follow_redirects``, default 5. :arg str user_agent: String to send as ``User-Agent`` header :arg bool decompress_response: Request a compressed response from the server and decompress it after downloading. Default is True. New in Tornado 4.0. :arg bool use_gzip: Deprecated alias for ``decompress_response`` since Tornado 4.0. :arg str network_interface: Network interface or source IP to use for request. See ``curl_httpclient`` note below. :arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will be run with each chunk of data as it is received, and ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in the final response. :arg collections.abc.Callable header_callback: If set, ``header_callback`` will be run with each header line as it is received (including the first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line containing only ``\r\n``. All lines include the trailing newline characters). ``HTTPResponse.headers`` will be empty in the final response. This is most useful in conjunction with ``streaming_callback``, because it's the only way to get access to header data while the request is in progress. :arg collections.abc.Callable prepare_curl_callback: If set, will be called with a ``pycurl.Curl`` object to allow the application to make additional ``setopt`` calls. :arg str proxy_host: HTTP proxy hostname. To use proxies, ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``, ``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are currently only supported with ``curl_httpclient``. :arg int proxy_port: HTTP proxy port :arg str proxy_username: HTTP proxy username :arg str proxy_password: HTTP proxy password :arg str proxy_auth_mode: HTTP proxy Authentication mode; default is "basic". supports "basic" and "digest" :arg bool allow_nonstandard_methods: Allow unknown values for ``method`` argument? Default is False. :arg bool validate_cert: For HTTPS requests, validate the server's certificate? Default is True. :arg str ca_certs: filename of CA certificates in PEM format, or None to use defaults. See note below when used with ``curl_httpclient``. :arg str client_key: Filename for client SSL key, if any. See note below when used with ``curl_httpclient``. :arg str client_cert: Filename for client SSL certificate, if any. See note below when used with ``curl_httpclient``. :arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in ``simple_httpclient`` (unsupported by ``curl_httpclient``). Overrides ``validate_cert``, ``ca_certs``, ``client_key``, and ``client_cert``. :arg bool allow_ipv6: Use IPv6 when available? Default is True. :arg bool expect_100_continue: If true, send the ``Expect: 100-continue`` header and wait for a continue response before sending the request body. Only supported with ``simple_httpclient``. .. note:: When using ``curl_httpclient`` certain options may be inherited by subsequent fetches because ``pycurl`` does not allow them to be cleanly reset. This applies to the ``ca_certs``, ``client_key``, ``client_cert``, and ``network_interface`` arguments. If you use these options, you should pass them on every request (you don't have to always use the same values, but it's not possible to mix requests that specify these options with ones that use the defaults). .. versionadded:: 3.1 The ``auth_mode`` argument. .. versionadded:: 4.0 The ``body_producer`` and ``expect_100_continue`` arguments. .. versionadded:: 4.2 The ``ssl_options`` argument. .. versionadded:: 4.5 The ``proxy_auth_mode`` argument. """ # Note that some of these attributes go through property setters # defined below. self.headers = headers # type: ignore if if_modified_since: self.headers["If-Modified-Since"] = httputil.format_timestamp( if_modified_since ) self.proxy_host = proxy_host self.proxy_port = proxy_port self.proxy_username = proxy_username self.proxy_password = proxy_password self.proxy_auth_mode = proxy_auth_mode self.url = url self.method = method self.body = body # type: ignore self.body_producer = body_producer self.auth_username = auth_username self.auth_password = auth_password self.auth_mode = auth_mode self.connect_timeout = connect_timeout self.request_timeout = request_timeout self.follow_redirects = follow_redirects self.max_redirects = max_redirects self.user_agent = user_agent if decompress_response is not None: self.decompress_response = decompress_response # type: Optional[bool] else: self.decompress_response = use_gzip self.network_interface = network_interface self.streaming_callback = streaming_callback self.header_callback = header_callback self.prepare_curl_callback = prepare_curl_callback self.allow_nonstandard_methods = allow_nonstandard_methods self.validate_cert = validate_cert self.ca_certs = ca_certs self.allow_ipv6 = allow_ipv6 self.client_key = client_key self.client_cert = client_cert self.ssl_options = ssl_options self.expect_100_continue = expect_100_continue self.start_time = time.time() @property def headers(self) -> httputil.HTTPHeaders: # TODO: headers may actually be a plain dict until fairly late in # the process (AsyncHTTPClient.fetch), but practically speaking, # whenever the property is used they're already HTTPHeaders. return self._headers # type: ignore @headers.setter def headers(self, value: Union[Dict[str, str], httputil.HTTPHeaders]) -> None: if value is None: self._headers = httputil.HTTPHeaders() else: self._headers = value # type: ignore @property def body(self) -> bytes: return self._body @body.setter def body(self, value: Union[bytes, str]) -> None: self._body = utf8(value) class HTTPResponse(object): """HTTP Response object. Attributes: * ``request``: HTTPRequest object * ``code``: numeric HTTP status code, e.g. 200 or 404 * ``reason``: human-readable reason phrase describing the status code * ``headers``: `tornado.httputil.HTTPHeaders` object * ``effective_url``: final location of the resource after following any redirects * ``buffer``: ``cStringIO`` object for response body * ``body``: response body as bytes (created on demand from ``self.buffer``) * ``error``: Exception object, if any * ``request_time``: seconds from request start to finish. Includes all network operations from DNS resolution to receiving the last byte of data. Does not include time spent in the queue (due to the ``max_clients`` option). If redirects were followed, only includes the final request. * ``start_time``: Time at which the HTTP operation started, based on `time.time` (not the monotonic clock used by `.IOLoop.time`). May be ``None`` if the request timed out while in the queue. * ``time_info``: dictionary of diagnostic timing information from the request. Available data are subject to change, but currently uses timings available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html, plus ``queue``, which is the delay (if any) introduced by waiting for a slot under `AsyncHTTPClient`'s ``max_clients`` setting. .. versionadded:: 5.1 Added the ``start_time`` attribute. .. versionchanged:: 5.1 The ``request_time`` attribute previously included time spent in the queue for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time is excluded in both implementations. ``request_time`` is now more accurate for ``curl_httpclient`` because it uses a monotonic clock when available. """ # I'm not sure why these don't get type-inferred from the references in __init__. error = None # type: Optional[BaseException] _error_is_response_code = False request = None # type: HTTPRequest def __init__( self, request: HTTPRequest, code: int, headers: Optional[httputil.HTTPHeaders] = None, buffer: Optional[BytesIO] = None, effective_url: Optional[str] = None, error: Optional[BaseException] = None, request_time: Optional[float] = None, time_info: Optional[Dict[str, float]] = None, reason: Optional[str] = None, start_time: Optional[float] = None, ) -> None: if isinstance(request, _RequestProxy): self.request = request.request else: self.request = request self.code = code self.reason = reason or httputil.responses.get(code, "Unknown") if headers is not None: self.headers = headers else: self.headers = httputil.HTTPHeaders() self.buffer = buffer self._body = None # type: Optional[bytes] if effective_url is None: self.effective_url = request.url else: self.effective_url = effective_url self._error_is_response_code = False if error is None: if self.code < 200 or self.code >= 300: self._error_is_response_code = True self.error = HTTPError(self.code, message=self.reason, response=self) else: self.error = None else: self.error = error self.start_time = start_time self.request_time = request_time self.time_info = time_info or {} @property def body(self) -> bytes: if self.buffer is None: return b"" elif self._body is None: self._body = self.buffer.getvalue() return self._body def rethrow(self) -> None: """If there was an error on the request, raise an `HTTPError`.""" if self.error: raise self.error def __repr__(self) -> str: args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items())) return "%s(%s)" % (self.__class__.__name__, args) class HTTPClientError(Exception): """Exception thrown for an unsuccessful HTTP request. Attributes: * ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is used when no HTTP response was received, e.g. for a timeout. * ``response`` - `HTTPResponse` object, if any. Note that if ``follow_redirects`` is False, redirects become HTTPErrors, and you can look at ``error.response.headers['Location']`` to see the destination of the redirect. .. versionchanged:: 5.1 Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with `tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains as an alias. """ def __init__( self, code: int, message: Optional[str] = None, response: Optional[HTTPResponse] = None, ) -> None: self.code = code self.message = message or httputil.responses.get(code, "Unknown") self.response = response super().__init__(code, message, response) def __str__(self) -> str: return "HTTP %d: %s" % (self.code, self.message) # There is a cyclic reference between self and self.response, # which breaks the default __repr__ implementation. # (especially on pypy, which doesn't have the same recursion # detection as cpython). __repr__ = __str__ HTTPError = HTTPClientError class _RequestProxy(object): """Combines an object with a dictionary of defaults. Used internally by AsyncHTTPClient implementations. """ def __init__( self, request: HTTPRequest, defaults: Optional[Dict[str, Any]] ) -> None: self.request = request self.defaults = defaults def __getattr__(self, name: str) -> Any: request_attr = getattr(self.request, name) if request_attr is not None: return request_attr elif self.defaults is not None: return self.defaults.get(name, None) else: return None def main() -> None: from tornado.options import define, options, parse_command_line define("print_headers", type=bool, default=False) define("print_body", type=bool, default=True) define("follow_redirects", type=bool, default=True) define("validate_cert", type=bool, default=True) define("proxy_host", type=str) define("proxy_port", type=int) args = parse_command_line() client = HTTPClient() for arg in args: try: response = client.fetch( arg, follow_redirects=options.follow_redirects, validate_cert=options.validate_cert, proxy_host=options.proxy_host, proxy_port=options.proxy_port, ) except HTTPError as e: if e.response is not None: response = e.response else: raise if options.print_headers: print(response.headers) if options.print_body: print(native_str(response.body)) client.close() if __name__ == "__main__": main() tornado-6.1.0/tornado/httpserver.py000066400000000000000000000363031374705040500174240ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A non-blocking, single-threaded HTTP server. Typical applications have little direct interaction with the `HTTPServer` class except to start a server at the beginning of the process (and even that is often done indirectly via `tornado.web.Application.listen`). .. versionchanged:: 4.0 The ``HTTPRequest`` class that used to live in this module has been moved to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias. """ import socket import ssl from tornado.escape import native_str from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters from tornado import httputil from tornado import iostream from tornado import netutil from tornado.tcpserver import TCPServer from tornado.util import Configurable import typing from typing import Union, Any, Dict, Callable, List, Type, Tuple, Optional, Awaitable if typing.TYPE_CHECKING: from typing import Set # noqa: F401 class HTTPServer(TCPServer, Configurable, httputil.HTTPServerConnectionDelegate): r"""A non-blocking, single-threaded HTTP server. A server is defined by a subclass of `.HTTPServerConnectionDelegate`, or, for backwards compatibility, a callback that takes an `.HTTPServerRequest` as an argument. The delegate is usually a `tornado.web.Application`. `HTTPServer` supports keep-alive connections by default (automatically for HTTP/1.1, or for HTTP/1.0 when the client requests ``Connection: keep-alive``). If ``xheaders`` is ``True``, we support the ``X-Real-Ip``/``X-Forwarded-For`` and ``X-Scheme``/``X-Forwarded-Proto`` headers, which override the remote IP and URI scheme/protocol for all requests. These headers are useful when running Tornado behind a reverse proxy or load balancer. The ``protocol`` argument can also be set to ``https`` if Tornado is run behind an SSL-decoding proxy that does not set one of the supported ``xheaders``. By default, when parsing the ``X-Forwarded-For`` header, Tornado will select the last (i.e., the closest) address on the list of hosts as the remote host IP address. To select the next server in the chain, a list of trusted downstream hosts may be passed as the ``trusted_downstream`` argument. These hosts will be skipped when parsing the ``X-Forwarded-For`` header. To make this server serve SSL traffic, send the ``ssl_options`` keyword argument with an `ssl.SSLContext` object. For compatibility with older versions of Python ``ssl_options`` may also be a dictionary of keyword arguments for the `ssl.wrap_socket` method.:: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), os.path.join(data_dir, "mydomain.key")) HTTPServer(application, ssl_options=ssl_ctx) `HTTPServer` initialization follows one of three patterns (the initialization methods are defined on `tornado.tcpserver.TCPServer`): 1. `~tornado.tcpserver.TCPServer.listen`: simple single-process:: server = HTTPServer(app) server.listen(8888) IOLoop.current().start() In many cases, `tornado.web.Application.listen` can be used to avoid the need to explicitly create the `HTTPServer`. 2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`: simple multi-process:: server = HTTPServer(app) server.bind(8888) server.start(0) # Forks multiple sub-processes IOLoop.current().start() When using this interface, an `.IOLoop` must *not* be passed to the `HTTPServer` constructor. `~.TCPServer.start` will always start the server on the default singleton `.IOLoop`. 3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process:: sockets = tornado.netutil.bind_sockets(8888) tornado.process.fork_processes(0) server = HTTPServer(app) server.add_sockets(sockets) IOLoop.current().start() The `~.TCPServer.add_sockets` interface is more complicated, but it can be used with `tornado.process.fork_processes` to give you more flexibility in when the fork happens. `~.TCPServer.add_sockets` can also be used in single-process servers if you want to create your listening sockets in some way other than `tornado.netutil.bind_sockets`. .. versionchanged:: 4.0 Added ``decompress_request``, ``chunk_size``, ``max_header_size``, ``idle_connection_timeout``, ``body_timeout``, ``max_body_size`` arguments. Added support for `.HTTPServerConnectionDelegate` instances as ``request_callback``. .. versionchanged:: 4.1 `.HTTPServerConnectionDelegate.start_request` is now called with two arguments ``(server_conn, request_conn)`` (in accordance with the documentation) instead of one ``(request_conn)``. .. versionchanged:: 4.2 `HTTPServer` is now a subclass of `tornado.util.Configurable`. .. versionchanged:: 4.5 Added the ``trusted_downstream`` argument. .. versionchanged:: 5.0 The ``io_loop`` argument has been removed. """ def __init__(self, *args: Any, **kwargs: Any) -> None: # Ignore args to __init__; real initialization belongs in # initialize since we're Configurable. (there's something # weird in initialization order between this class, # Configurable, and TCPServer so we can't leave __init__ out # completely) pass def initialize( self, request_callback: Union[ httputil.HTTPServerConnectionDelegate, Callable[[httputil.HTTPServerRequest], None], ], no_keep_alive: bool = False, xheaders: bool = False, ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None, protocol: Optional[str] = None, decompress_request: bool = False, chunk_size: Optional[int] = None, max_header_size: Optional[int] = None, idle_connection_timeout: Optional[float] = None, body_timeout: Optional[float] = None, max_body_size: Optional[int] = None, max_buffer_size: Optional[int] = None, trusted_downstream: Optional[List[str]] = None, ) -> None: # This method's signature is not extracted with autodoc # because we want its arguments to appear on the class # constructor. When changing this signature, also update the # copy in httpserver.rst. self.request_callback = request_callback self.xheaders = xheaders self.protocol = protocol self.conn_params = HTTP1ConnectionParameters( decompress=decompress_request, chunk_size=chunk_size, max_header_size=max_header_size, header_timeout=idle_connection_timeout or 3600, max_body_size=max_body_size, body_timeout=body_timeout, no_keep_alive=no_keep_alive, ) TCPServer.__init__( self, ssl_options=ssl_options, max_buffer_size=max_buffer_size, read_chunk_size=chunk_size, ) self._connections = set() # type: Set[HTTP1ServerConnection] self.trusted_downstream = trusted_downstream @classmethod def configurable_base(cls) -> Type[Configurable]: return HTTPServer @classmethod def configurable_default(cls) -> Type[Configurable]: return HTTPServer async def close_all_connections(self) -> None: """Close all open connections and asynchronously wait for them to finish. This method is used in combination with `~.TCPServer.stop` to support clean shutdowns (especially for unittests). Typical usage would call ``stop()`` first to stop accepting new connections, then ``await close_all_connections()`` to wait for existing connections to finish. This method does not currently close open websocket connections. Note that this method is a coroutine and must be called with ``await``. """ while self._connections: # Peek at an arbitrary element of the set conn = next(iter(self._connections)) await conn.close() def handle_stream(self, stream: iostream.IOStream, address: Tuple) -> None: context = _HTTPRequestContext( stream, address, self.protocol, self.trusted_downstream ) conn = HTTP1ServerConnection(stream, self.conn_params, context) self._connections.add(conn) conn.start_serving(self) def start_request( self, server_conn: object, request_conn: httputil.HTTPConnection ) -> httputil.HTTPMessageDelegate: if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate): delegate = self.request_callback.start_request(server_conn, request_conn) else: delegate = _CallableAdapter(self.request_callback, request_conn) if self.xheaders: delegate = _ProxyAdapter(delegate, request_conn) return delegate def on_close(self, server_conn: object) -> None: self._connections.remove(typing.cast(HTTP1ServerConnection, server_conn)) class _CallableAdapter(httputil.HTTPMessageDelegate): def __init__( self, request_callback: Callable[[httputil.HTTPServerRequest], None], request_conn: httputil.HTTPConnection, ) -> None: self.connection = request_conn self.request_callback = request_callback self.request = None # type: Optional[httputil.HTTPServerRequest] self.delegate = None self._chunks = [] # type: List[bytes] def headers_received( self, start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], headers: httputil.HTTPHeaders, ) -> Optional[Awaitable[None]]: self.request = httputil.HTTPServerRequest( connection=self.connection, start_line=typing.cast(httputil.RequestStartLine, start_line), headers=headers, ) return None def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]: self._chunks.append(chunk) return None def finish(self) -> None: assert self.request is not None self.request.body = b"".join(self._chunks) self.request._parse_body() self.request_callback(self.request) def on_connection_close(self) -> None: del self._chunks class _HTTPRequestContext(object): def __init__( self, stream: iostream.IOStream, address: Tuple, protocol: Optional[str], trusted_downstream: Optional[List[str]] = None, ) -> None: self.address = address # Save the socket's address family now so we know how to # interpret self.address even after the stream is closed # and its socket attribute replaced with None. if stream.socket is not None: self.address_family = stream.socket.family else: self.address_family = None # In HTTPServerRequest we want an IP, not a full socket address. if ( self.address_family in (socket.AF_INET, socket.AF_INET6) and address is not None ): self.remote_ip = address[0] else: # Unix (or other) socket; fake the remote address. self.remote_ip = "0.0.0.0" if protocol: self.protocol = protocol elif isinstance(stream, iostream.SSLIOStream): self.protocol = "https" else: self.protocol = "http" self._orig_remote_ip = self.remote_ip self._orig_protocol = self.protocol self.trusted_downstream = set(trusted_downstream or []) def __str__(self) -> str: if self.address_family in (socket.AF_INET, socket.AF_INET6): return self.remote_ip elif isinstance(self.address, bytes): # Python 3 with the -bb option warns about str(bytes), # so convert it explicitly. # Unix socket addresses are str on mac but bytes on linux. return native_str(self.address) else: return str(self.address) def _apply_xheaders(self, headers: httputil.HTTPHeaders) -> None: """Rewrite the ``remote_ip`` and ``protocol`` fields.""" # Squid uses X-Forwarded-For, others use X-Real-Ip ip = headers.get("X-Forwarded-For", self.remote_ip) # Skip trusted downstream hosts in X-Forwarded-For list for ip in (cand.strip() for cand in reversed(ip.split(","))): if ip not in self.trusted_downstream: break ip = headers.get("X-Real-Ip", ip) if netutil.is_valid_ip(ip): self.remote_ip = ip # AWS uses X-Forwarded-Proto proto_header = headers.get( "X-Scheme", headers.get("X-Forwarded-Proto", self.protocol) ) if proto_header: # use only the last proto entry if there is more than one # TODO: support trusting multiple layers of proxied protocol proto_header = proto_header.split(",")[-1].strip() if proto_header in ("http", "https"): self.protocol = proto_header def _unapply_xheaders(self) -> None: """Undo changes from `_apply_xheaders`. Xheaders are per-request so they should not leak to the next request on the same connection. """ self.remote_ip = self._orig_remote_ip self.protocol = self._orig_protocol class _ProxyAdapter(httputil.HTTPMessageDelegate): def __init__( self, delegate: httputil.HTTPMessageDelegate, request_conn: httputil.HTTPConnection, ) -> None: self.connection = request_conn self.delegate = delegate def headers_received( self, start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], headers: httputil.HTTPHeaders, ) -> Optional[Awaitable[None]]: # TODO: either make context an official part of the # HTTPConnection interface or figure out some other way to do this. self.connection.context._apply_xheaders(headers) # type: ignore return self.delegate.headers_received(start_line, headers) def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]: return self.delegate.data_received(chunk) def finish(self) -> None: self.delegate.finish() self._cleanup() def on_connection_close(self) -> None: self.delegate.on_connection_close() self._cleanup() def _cleanup(self) -> None: self.connection.context._unapply_xheaders() # type: ignore HTTPRequest = httputil.HTTPServerRequest tornado-6.1.0/tornado/httputil.py000066400000000000000000001061351374705040500170740ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HTTP utility code shared by clients and servers. This module also defines the `HTTPServerRequest` class which is exposed via `tornado.web.RequestHandler.request`. """ import calendar import collections import copy import datetime import email.utils from functools import lru_cache from http.client import responses import http.cookies import re from ssl import SSLError import time import unicodedata from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl from tornado.escape import native_str, parse_qs_bytes, utf8 from tornado.log import gen_log from tornado.util import ObjectDict, unicode_type # responses is unused in this file, but we re-export it to other files. # Reference it so pyflakes doesn't complain. responses import typing from typing import ( Tuple, Iterable, List, Mapping, Iterator, Dict, Union, Optional, Awaitable, Generator, AnyStr, ) if typing.TYPE_CHECKING: from typing import Deque # noqa: F401 from asyncio import Future # noqa: F401 import unittest # noqa: F401 @lru_cache(1000) def _normalize_header(name: str) -> str: """Map a header name to Http-Header-Case. >>> _normalize_header("coNtent-TYPE") 'Content-Type' """ return "-".join([w.capitalize() for w in name.split("-")]) class HTTPHeaders(collections.abc.MutableMapping): """A dictionary that maintains ``Http-Header-Case`` for all keys. Supports multiple values per key via a pair of new methods, `add()` and `get_list()`. The regular dictionary interface returns a single value per key, with multiple values joined by a comma. >>> h = HTTPHeaders({"content-type": "text/html"}) >>> list(h.keys()) ['Content-Type'] >>> h["Content-Type"] 'text/html' >>> h.add("Set-Cookie", "A=B") >>> h.add("Set-Cookie", "C=D") >>> h["set-cookie"] 'A=B,C=D' >>> h.get_list("set-cookie") ['A=B', 'C=D'] >>> for (k,v) in sorted(h.get_all()): ... print('%s: %s' % (k,v)) ... Content-Type: text/html Set-Cookie: A=B Set-Cookie: C=D """ @typing.overload def __init__(self, __arg: Mapping[str, List[str]]) -> None: pass @typing.overload # noqa: F811 def __init__(self, __arg: Mapping[str, str]) -> None: pass @typing.overload # noqa: F811 def __init__(self, *args: Tuple[str, str]) -> None: pass @typing.overload # noqa: F811 def __init__(self, **kwargs: str) -> None: pass def __init__(self, *args: typing.Any, **kwargs: str) -> None: # noqa: F811 self._dict = {} # type: typing.Dict[str, str] self._as_list = {} # type: typing.Dict[str, typing.List[str]] self._last_key = None # type: Optional[str] if len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], HTTPHeaders): # Copy constructor for k, v in args[0].get_all(): self.add(k, v) else: # Dict-style initialization self.update(*args, **kwargs) # new public methods def add(self, name: str, value: str) -> None: """Adds a new value for the given key.""" norm_name = _normalize_header(name) self._last_key = norm_name if norm_name in self: self._dict[norm_name] = ( native_str(self[norm_name]) + "," + native_str(value) ) self._as_list[norm_name].append(value) else: self[norm_name] = value def get_list(self, name: str) -> List[str]: """Returns all values for the given header as a list.""" norm_name = _normalize_header(name) return self._as_list.get(norm_name, []) def get_all(self) -> Iterable[Tuple[str, str]]: """Returns an iterable of all (name, value) pairs. If a header has multiple values, multiple pairs will be returned with the same name. """ for name, values in self._as_list.items(): for value in values: yield (name, value) def parse_line(self, line: str) -> None: """Updates the dictionary with a single header line. >>> h = HTTPHeaders() >>> h.parse_line("Content-Type: text/html") >>> h.get('content-type') 'text/html' """ if line[0].isspace(): # continuation of a multi-line header if self._last_key is None: raise HTTPInputError("first header line cannot start with whitespace") new_part = " " + line.lstrip() self._as_list[self._last_key][-1] += new_part self._dict[self._last_key] += new_part else: try: name, value = line.split(":", 1) except ValueError: raise HTTPInputError("no colon in header line") self.add(name, value.strip()) @classmethod def parse(cls, headers: str) -> "HTTPHeaders": """Returns a dictionary from HTTP header text. >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") >>> sorted(h.items()) [('Content-Length', '42'), ('Content-Type', 'text/html')] .. versionchanged:: 5.1 Raises `HTTPInputError` on malformed headers instead of a mix of `KeyError`, and `ValueError`. """ h = cls() # RFC 7230 section 3.5: a recipient MAY recognize a single LF as a line # terminator and ignore any preceding CR. for line in headers.split("\n"): if line.endswith("\r"): line = line[:-1] if line: h.parse_line(line) return h # MutableMapping abstract method implementations. def __setitem__(self, name: str, value: str) -> None: norm_name = _normalize_header(name) self._dict[norm_name] = value self._as_list[norm_name] = [value] def __getitem__(self, name: str) -> str: return self._dict[_normalize_header(name)] def __delitem__(self, name: str) -> None: norm_name = _normalize_header(name) del self._dict[norm_name] del self._as_list[norm_name] def __len__(self) -> int: return len(self._dict) def __iter__(self) -> Iterator[typing.Any]: return iter(self._dict) def copy(self) -> "HTTPHeaders": # defined in dict but not in MutableMapping. return HTTPHeaders(self) # Use our overridden copy method for the copy.copy module. # This makes shallow copies one level deeper, but preserves # the appearance that HTTPHeaders is a single container. __copy__ = copy def __str__(self) -> str: lines = [] for name, value in self.get_all(): lines.append("%s: %s\n" % (name, value)) return "".join(lines) __unicode__ = __str__ class HTTPServerRequest(object): """A single HTTP request. All attributes are type `str` unless otherwise noted. .. attribute:: method HTTP request method, e.g. "GET" or "POST" .. attribute:: uri The requested uri. .. attribute:: path The path portion of `uri` .. attribute:: query The query portion of `uri` .. attribute:: version HTTP version specified in request, e.g. "HTTP/1.1" .. attribute:: headers `.HTTPHeaders` dictionary-like object for request headers. Acts like a case-insensitive dictionary with additional methods for repeated headers. .. attribute:: body Request body, if present, as a byte string. .. attribute:: remote_ip Client's IP address as a string. If ``HTTPServer.xheaders`` is set, will pass along the real IP address provided by a load balancer in the ``X-Real-Ip`` or ``X-Forwarded-For`` header. .. versionchanged:: 3.1 The list format of ``X-Forwarded-For`` is now supported. .. attribute:: protocol The protocol used, either "http" or "https". If ``HTTPServer.xheaders`` is set, will pass along the protocol used by a load balancer if reported via an ``X-Scheme`` header. .. attribute:: host The requested hostname, usually taken from the ``Host`` header. .. attribute:: arguments GET/POST arguments are available in the arguments property, which maps arguments names to lists of values (to support multiple values for individual names). Names are of type `str`, while arguments are byte strings. Note that this is different from `.RequestHandler.get_argument`, which returns argument values as unicode strings. .. attribute:: query_arguments Same format as ``arguments``, but contains only arguments extracted from the query string. .. versionadded:: 3.2 .. attribute:: body_arguments Same format as ``arguments``, but contains only arguments extracted from the request body. .. versionadded:: 3.2 .. attribute:: files File uploads are available in the files property, which maps file names to lists of `.HTTPFile`. .. attribute:: connection An HTTP request is attached to a single HTTP connection, which can be accessed through the "connection" attribute. Since connections are typically kept open in HTTP/1.1, multiple requests can be handled sequentially on a single connection. .. versionchanged:: 4.0 Moved from ``tornado.httpserver.HTTPRequest``. """ path = None # type: str query = None # type: str # HACK: Used for stream_request_body _body_future = None # type: Future[None] def __init__( self, method: Optional[str] = None, uri: Optional[str] = None, version: str = "HTTP/1.0", headers: Optional[HTTPHeaders] = None, body: Optional[bytes] = None, host: Optional[str] = None, files: Optional[Dict[str, List["HTTPFile"]]] = None, connection: Optional["HTTPConnection"] = None, start_line: Optional["RequestStartLine"] = None, server_connection: Optional[object] = None, ) -> None: if start_line is not None: method, uri, version = start_line self.method = method self.uri = uri self.version = version self.headers = headers or HTTPHeaders() self.body = body or b"" # set remote IP and protocol context = getattr(connection, "context", None) self.remote_ip = getattr(context, "remote_ip", None) self.protocol = getattr(context, "protocol", "http") self.host = host or self.headers.get("Host") or "127.0.0.1" self.host_name = split_host_and_port(self.host.lower())[0] self.files = files or {} self.connection = connection self.server_connection = server_connection self._start_time = time.time() self._finish_time = None if uri is not None: self.path, sep, self.query = uri.partition("?") self.arguments = parse_qs_bytes(self.query, keep_blank_values=True) self.query_arguments = copy.deepcopy(self.arguments) self.body_arguments = {} # type: Dict[str, List[bytes]] @property def cookies(self) -> Dict[str, http.cookies.Morsel]: """A dictionary of ``http.cookies.Morsel`` objects.""" if not hasattr(self, "_cookies"): self._cookies = ( http.cookies.SimpleCookie() ) # type: http.cookies.SimpleCookie if "Cookie" in self.headers: try: parsed = parse_cookie(self.headers["Cookie"]) except Exception: pass else: for k, v in parsed.items(): try: self._cookies[k] = v except Exception: # SimpleCookie imposes some restrictions on keys; # parse_cookie does not. Discard any cookies # with disallowed keys. pass return self._cookies def full_url(self) -> str: """Reconstructs the full URL for this request.""" return self.protocol + "://" + self.host + self.uri def request_time(self) -> float: """Returns the amount of time it took for this request to execute.""" if self._finish_time is None: return time.time() - self._start_time else: return self._finish_time - self._start_time def get_ssl_certificate( self, binary_form: bool = False ) -> Union[None, Dict, bytes]: """Returns the client's SSL certificate, if any. To use client certificates, the HTTPServer's `ssl.SSLContext.verify_mode` field must be set, e.g.:: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain("foo.crt", "foo.key") ssl_ctx.load_verify_locations("cacerts.pem") ssl_ctx.verify_mode = ssl.CERT_REQUIRED server = HTTPServer(app, ssl_options=ssl_ctx) By default, the return value is a dictionary (or None, if no client certificate is present). If ``binary_form`` is true, a DER-encoded form of the certificate is returned instead. See SSLSocket.getpeercert() in the standard library for more details. http://docs.python.org/library/ssl.html#sslsocket-objects """ try: if self.connection is None: return None # TODO: add a method to HTTPConnection for this so it can work with HTTP/2 return self.connection.stream.socket.getpeercert( # type: ignore binary_form=binary_form ) except SSLError: return None def _parse_body(self) -> None: parse_body_arguments( self.headers.get("Content-Type", ""), self.body, self.body_arguments, self.files, self.headers, ) for k, v in self.body_arguments.items(): self.arguments.setdefault(k, []).extend(v) def __repr__(self) -> str: attrs = ("protocol", "host", "method", "uri", "version", "remote_ip") args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) return "%s(%s)" % (self.__class__.__name__, args) class HTTPInputError(Exception): """Exception class for malformed HTTP requests or responses from remote sources. .. versionadded:: 4.0 """ pass class HTTPOutputError(Exception): """Exception class for errors in HTTP output. .. versionadded:: 4.0 """ pass class HTTPServerConnectionDelegate(object): """Implement this interface to handle requests from `.HTTPServer`. .. versionadded:: 4.0 """ def start_request( self, server_conn: object, request_conn: "HTTPConnection" ) -> "HTTPMessageDelegate": """This method is called by the server when a new request has started. :arg server_conn: is an opaque object representing the long-lived (e.g. tcp-level) connection. :arg request_conn: is a `.HTTPConnection` object for a single request/response exchange. This method should return a `.HTTPMessageDelegate`. """ raise NotImplementedError() def on_close(self, server_conn: object) -> None: """This method is called when a connection has been closed. :arg server_conn: is a server connection that has previously been passed to ``start_request``. """ pass class HTTPMessageDelegate(object): """Implement this interface to handle an HTTP request or response. .. versionadded:: 4.0 """ # TODO: genericize this class to avoid exposing the Union. def headers_received( self, start_line: Union["RequestStartLine", "ResponseStartLine"], headers: HTTPHeaders, ) -> Optional[Awaitable[None]]: """Called when the HTTP headers have been received and parsed. :arg start_line: a `.RequestStartLine` or `.ResponseStartLine` depending on whether this is a client or server message. :arg headers: a `.HTTPHeaders` instance. Some `.HTTPConnection` methods can only be called during ``headers_received``. May return a `.Future`; if it does the body will not be read until it is done. """ pass def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]: """Called when a chunk of data has been received. May return a `.Future` for flow control. """ pass def finish(self) -> None: """Called after the last chunk of data has been received.""" pass def on_connection_close(self) -> None: """Called if the connection is closed without finishing the request. If ``headers_received`` is called, either ``finish`` or ``on_connection_close`` will be called, but not both. """ pass class HTTPConnection(object): """Applications use this interface to write their responses. .. versionadded:: 4.0 """ def write_headers( self, start_line: Union["RequestStartLine", "ResponseStartLine"], headers: HTTPHeaders, chunk: Optional[bytes] = None, ) -> "Future[None]": """Write an HTTP header block. :arg start_line: a `.RequestStartLine` or `.ResponseStartLine`. :arg headers: a `.HTTPHeaders` instance. :arg chunk: the first (optional) chunk of data. This is an optimization so that small responses can be written in the same call as their headers. The ``version`` field of ``start_line`` is ignored. Returns a future for flow control. .. versionchanged:: 6.0 The ``callback`` argument was removed. """ raise NotImplementedError() def write(self, chunk: bytes) -> "Future[None]": """Writes a chunk of body data. Returns a future for flow control. .. versionchanged:: 6.0 The ``callback`` argument was removed. """ raise NotImplementedError() def finish(self) -> None: """Indicates that the last body data has been written. """ raise NotImplementedError() def url_concat( url: str, args: Union[ None, Dict[str, str], List[Tuple[str, str]], Tuple[Tuple[str, str], ...] ], ) -> str: """Concatenate url and arguments regardless of whether url has existing query parameters. ``args`` may be either a dictionary or a list of key-value pairs (the latter allows for multiple values with the same key. >>> url_concat("http://example.com/foo", dict(c="d")) 'http://example.com/foo?c=d' >>> url_concat("http://example.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d' >>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")]) 'http://example.com/foo?a=b&c=d&c=d2' """ if args is None: return url parsed_url = urlparse(url) if isinstance(args, dict): parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) parsed_query.extend(args.items()) elif isinstance(args, list) or isinstance(args, tuple): parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) parsed_query.extend(args) else: err = "'args' parameter should be dict, list or tuple. Not {0}".format( type(args) ) raise TypeError(err) final_query = urlencode(parsed_query) url = urlunparse( ( parsed_url[0], parsed_url[1], parsed_url[2], parsed_url[3], final_query, parsed_url[5], ) ) return url class HTTPFile(ObjectDict): """Represents a file uploaded via a form. For backwards compatibility, its instance attributes are also accessible as dictionary keys. * ``filename`` * ``body`` * ``content_type`` """ pass def _parse_request_range( range_header: str, ) -> Optional[Tuple[Optional[int], Optional[int]]]: """Parses a Range header. Returns either ``None`` or tuple ``(start, end)``. Note that while the HTTP headers use inclusive byte positions, this method returns indexes suitable for use in slices. >>> start, end = _parse_request_range("bytes=1-2") >>> start, end (1, 3) >>> [0, 1, 2, 3, 4][start:end] [1, 2] >>> _parse_request_range("bytes=6-") (6, None) >>> _parse_request_range("bytes=-6") (-6, None) >>> _parse_request_range("bytes=-0") (None, 0) >>> _parse_request_range("bytes=") (None, None) >>> _parse_request_range("foo=42") >>> _parse_request_range("bytes=1-2,6-10") Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed). See [0] for the details of the range header. [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges """ unit, _, value = range_header.partition("=") unit, value = unit.strip(), value.strip() if unit != "bytes": return None start_b, _, end_b = value.partition("-") try: start = _int_or_none(start_b) end = _int_or_none(end_b) except ValueError: return None if end is not None: if start is None: if end != 0: start = -end end = None else: end += 1 return (start, end) def _get_content_range(start: Optional[int], end: Optional[int], total: int) -> str: """Returns a suitable Content-Range header: >>> print(_get_content_range(None, 1, 4)) bytes 0-0/4 >>> print(_get_content_range(1, 3, 4)) bytes 1-2/4 >>> print(_get_content_range(None, None, 4)) bytes 0-3/4 """ start = start or 0 end = (end or total) - 1 return "bytes %s-%s/%s" % (start, end, total) def _int_or_none(val: str) -> Optional[int]: val = val.strip() if val == "": return None return int(val) def parse_body_arguments( content_type: str, body: bytes, arguments: Dict[str, List[bytes]], files: Dict[str, List[HTTPFile]], headers: Optional[HTTPHeaders] = None, ) -> None: """Parses a form request body. Supports ``application/x-www-form-urlencoded`` and ``multipart/form-data``. The ``content_type`` parameter should be a string and ``body`` should be a byte string. The ``arguments`` and ``files`` parameters are dictionaries that will be updated with the parsed contents. """ if content_type.startswith("application/x-www-form-urlencoded"): if headers and "Content-Encoding" in headers: gen_log.warning( "Unsupported Content-Encoding: %s", headers["Content-Encoding"] ) return try: # real charset decoding will happen in RequestHandler.decode_argument() uri_arguments = parse_qs_bytes(body, keep_blank_values=True) except Exception as e: gen_log.warning("Invalid x-www-form-urlencoded body: %s", e) uri_arguments = {} for name, values in uri_arguments.items(): if values: arguments.setdefault(name, []).extend(values) elif content_type.startswith("multipart/form-data"): if headers and "Content-Encoding" in headers: gen_log.warning( "Unsupported Content-Encoding: %s", headers["Content-Encoding"] ) return try: fields = content_type.split(";") for field in fields: k, sep, v = field.strip().partition("=") if k == "boundary" and v: parse_multipart_form_data(utf8(v), body, arguments, files) break else: raise ValueError("multipart boundary not found") except Exception as e: gen_log.warning("Invalid multipart/form-data: %s", e) def parse_multipart_form_data( boundary: bytes, data: bytes, arguments: Dict[str, List[bytes]], files: Dict[str, List[HTTPFile]], ) -> None: """Parses a ``multipart/form-data`` body. The ``boundary`` and ``data`` parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body. .. versionchanged:: 5.1 Now recognizes non-ASCII filenames in RFC 2231/5987 (``filename*=``) format. """ # The standard allows for the boundary to be quoted in the header, # although it's rare (it happens at least for google app engine # xmpp). I think we're also supposed to handle backslash-escapes # here but I'll save that until we see a client that uses them # in the wild. if boundary.startswith(b'"') and boundary.endswith(b'"'): boundary = boundary[1:-1] final_boundary_index = data.rfind(b"--" + boundary + b"--") if final_boundary_index == -1: gen_log.warning("Invalid multipart/form-data: no final boundary") return parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n") for part in parts: if not part: continue eoh = part.find(b"\r\n\r\n") if eoh == -1: gen_log.warning("multipart/form-data missing headers") continue headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) disp_header = headers.get("Content-Disposition", "") disposition, disp_params = _parse_header(disp_header) if disposition != "form-data" or not part.endswith(b"\r\n"): gen_log.warning("Invalid multipart/form-data") continue value = part[eoh + 4 : -2] if not disp_params.get("name"): gen_log.warning("multipart/form-data value missing name") continue name = disp_params["name"] if disp_params.get("filename"): ctype = headers.get("Content-Type", "application/unknown") files.setdefault(name, []).append( HTTPFile( filename=disp_params["filename"], body=value, content_type=ctype ) ) else: arguments.setdefault(name, []).append(value) def format_timestamp( ts: Union[int, float, tuple, time.struct_time, datetime.datetime] ) -> str: """Formats a timestamp in the format used by HTTP. The argument may be a numeric timestamp as returned by `time.time`, a time tuple as returned by `time.gmtime`, or a `datetime.datetime` object. >>> format_timestamp(1359312200) 'Sun, 27 Jan 2013 18:43:20 GMT' """ if isinstance(ts, (int, float)): time_num = ts elif isinstance(ts, (tuple, time.struct_time)): time_num = calendar.timegm(ts) elif isinstance(ts, datetime.datetime): time_num = calendar.timegm(ts.utctimetuple()) else: raise TypeError("unknown timestamp type: %r" % ts) return email.utils.formatdate(time_num, usegmt=True) RequestStartLine = collections.namedtuple( "RequestStartLine", ["method", "path", "version"] ) _http_version_re = re.compile(r"^HTTP/1\.[0-9]$") def parse_request_start_line(line: str) -> RequestStartLine: """Returns a (method, path, version) tuple for an HTTP 1.x request line. The response is a `collections.namedtuple`. >>> parse_request_start_line("GET /foo HTTP/1.1") RequestStartLine(method='GET', path='/foo', version='HTTP/1.1') """ try: method, path, version = line.split(" ") except ValueError: # https://tools.ietf.org/html/rfc7230#section-3.1.1 # invalid request-line SHOULD respond with a 400 (Bad Request) raise HTTPInputError("Malformed HTTP request line") if not _http_version_re.match(version): raise HTTPInputError( "Malformed HTTP version in HTTP Request-Line: %r" % version ) return RequestStartLine(method, path, version) ResponseStartLine = collections.namedtuple( "ResponseStartLine", ["version", "code", "reason"] ) _http_response_line_re = re.compile(r"(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)") def parse_response_start_line(line: str) -> ResponseStartLine: """Returns a (version, code, reason) tuple for an HTTP 1.x response line. The response is a `collections.namedtuple`. >>> parse_response_start_line("HTTP/1.1 200 OK") ResponseStartLine(version='HTTP/1.1', code=200, reason='OK') """ line = native_str(line) match = _http_response_line_re.match(line) if not match: raise HTTPInputError("Error parsing response start line") return ResponseStartLine(match.group(1), int(match.group(2)), match.group(3)) # _parseparam and _parse_header are copied and modified from python2.7's cgi.py # The original 2.7 version of this code did not correctly support some # combinations of semicolons and double quotes. # It has also been modified to support valueless parameters as seen in # websocket extension negotiations, and to support non-ascii values in # RFC 2231/5987 format. def _parseparam(s: str) -> Generator[str, None, None]: while s[:1] == ";": s = s[1:] end = s.find(";") while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: end = s.find(";", end + 1) if end < 0: end = len(s) f = s[:end] yield f.strip() s = s[end:] def _parse_header(line: str) -> Tuple[str, Dict[str, str]]: r"""Parse a Content-type like header. Return the main content-type and a dictionary of options. >>> d = "form-data; foo=\"b\\\\a\\\"r\"; file*=utf-8''T%C3%A4st" >>> ct, d = _parse_header(d) >>> ct 'form-data' >>> d['file'] == r'T\u00e4st'.encode('ascii').decode('unicode_escape') True >>> d['foo'] 'b\\a"r' """ parts = _parseparam(";" + line) key = next(parts) # decode_params treats first argument special, but we already stripped key params = [("Dummy", "value")] for p in parts: i = p.find("=") if i >= 0: name = p[:i].strip().lower() value = p[i + 1 :].strip() params.append((name, native_str(value))) decoded_params = email.utils.decode_params(params) decoded_params.pop(0) # get rid of the dummy again pdict = {} for name, decoded_value in decoded_params: value = email.utils.collapse_rfc2231_value(decoded_value) if len(value) >= 2 and value[0] == '"' and value[-1] == '"': value = value[1:-1] pdict[name] = value return key, pdict def _encode_header(key: str, pdict: Dict[str, str]) -> str: """Inverse of _parse_header. >>> _encode_header('permessage-deflate', ... {'client_max_window_bits': 15, 'client_no_context_takeover': None}) 'permessage-deflate; client_max_window_bits=15; client_no_context_takeover' """ if not pdict: return key out = [key] # Sort the parameters just to make it easy to test. for k, v in sorted(pdict.items()): if v is None: out.append(k) else: # TODO: quote if necessary. out.append("%s=%s" % (k, v)) return "; ".join(out) def encode_username_password( username: Union[str, bytes], password: Union[str, bytes] ) -> bytes: """Encodes a username/password pair in the format used by HTTP auth. The return value is a byte string in the form ``username:password``. .. versionadded:: 5.1 """ if isinstance(username, unicode_type): username = unicodedata.normalize("NFC", username) if isinstance(password, unicode_type): password = unicodedata.normalize("NFC", password) return utf8(username) + b":" + utf8(password) def doctests(): # type: () -> unittest.TestSuite import doctest return doctest.DocTestSuite() _netloc_re = re.compile(r"^(.+):(\d+)$") def split_host_and_port(netloc: str) -> Tuple[str, Optional[int]]: """Returns ``(host, port)`` tuple from ``netloc``. Returned ``port`` will be ``None`` if not present. .. versionadded:: 4.1 """ match = _netloc_re.match(netloc) if match: host = match.group(1) port = int(match.group(2)) # type: Optional[int] else: host = netloc port = None return (host, port) def qs_to_qsl(qs: Dict[str, List[AnyStr]]) -> Iterable[Tuple[str, AnyStr]]: """Generator converting a result of ``parse_qs`` back to name-value pairs. .. versionadded:: 5.0 """ for k, vs in qs.items(): for v in vs: yield (k, v) _OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") _QuotePatt = re.compile(r"[\\].") _nulljoin = "".join def _unquote_cookie(s: str) -> str: """Handle double quotes and escaping in cookie values. This method is copied verbatim from the Python 3.5 standard library (http.cookies._unquote) so we don't have to depend on non-public interfaces. """ # If there aren't any doublequotes, # then there can't be any special characters. See RFC 2109. if s is None or len(s) < 2: return s if s[0] != '"' or s[-1] != '"': return s # We have to assume that we must decode this string. # Down to work. # Remove the "s s = s[1:-1] # Check for special sequences. Examples: # \012 --> \n # \" --> " # i = 0 n = len(s) res = [] while 0 <= i < n: o_match = _OctalPatt.search(s, i) q_match = _QuotePatt.search(s, i) if not o_match and not q_match: # Neither matched res.append(s[i:]) break # else: j = k = -1 if o_match: j = o_match.start(0) if q_match: k = q_match.start(0) if q_match and (not o_match or k < j): # QuotePatt matched res.append(s[i:k]) res.append(s[k + 1]) i = k + 2 else: # OctalPatt matched res.append(s[i:j]) res.append(chr(int(s[j + 1 : j + 4], 8))) i = j + 4 return _nulljoin(res) def parse_cookie(cookie: str) -> Dict[str, str]: """Parse a ``Cookie`` HTTP header into a dict of name/value pairs. This function attempts to mimic browser cookie parsing behavior; it specifically does not follow any of the cookie-related RFCs (because browsers don't either). The algorithm used is identical to that used by Django version 1.9.10. .. versionadded:: 4.4.2 """ cookiedict = {} for chunk in cookie.split(str(";")): if str("=") in chunk: key, val = chunk.split(str("="), 1) else: # Assume an empty name per # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 key, val = str(""), chunk key, val = key.strip(), val.strip() if key or val: # unquote using Python's algorithm. cookiedict[key] = _unquote_cookie(val) return cookiedict tornado-6.1.0/tornado/ioloop.py000066400000000000000000001050161374705040500165150ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """An I/O event loop for non-blocking sockets. In Tornado 6.0, `.IOLoop` is a wrapper around the `asyncio` event loop, with a slightly different interface for historical reasons. Applications can use either the `.IOLoop` interface or the underlying `asyncio` event loop directly (unless compatibility with older versions of Tornado is desired, in which case `.IOLoop` must be used). Typical applications will use a single `IOLoop` object, accessed via `IOLoop.current` class method. The `IOLoop.start` method (or equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually be called at the end of the ``main()`` function. Atypical applications may use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest` case. """ import asyncio import concurrent.futures import datetime import functools import logging import numbers import os import sys import time import math import random from tornado.concurrent import ( Future, is_future, chain_future, future_set_exc_info, future_add_done_callback, ) from tornado.log import app_log from tornado.util import Configurable, TimeoutError, import_object import typing from typing import Union, Any, Type, Optional, Callable, TypeVar, Tuple, Awaitable if typing.TYPE_CHECKING: from typing import Dict, List # noqa: F401 from typing_extensions import Protocol else: Protocol = object class _Selectable(Protocol): def fileno(self) -> int: pass def close(self) -> None: pass _T = TypeVar("_T") _S = TypeVar("_S", bound=_Selectable) class IOLoop(Configurable): """An I/O event loop. As of Tornado 6.0, `IOLoop` is a wrapper around the `asyncio` event loop. Example usage for a simple TCP server: .. testcode:: import errno import functools import socket import tornado.ioloop from tornado.iostream import IOStream async def handle_connection(connection, address): stream = IOStream(connection) message = await stream.read_until_close() print("message from client:", message.decode().strip()) def connection_ready(sock, fd, events): while True: try: connection, address = sock.accept() except BlockingIOError: return connection.setblocking(0) io_loop = tornado.ioloop.IOLoop.current() io_loop.spawn_callback(handle_connection, connection, address) if __name__ == '__main__': sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(0) sock.bind(("", 8888)) sock.listen(128) io_loop = tornado.ioloop.IOLoop.current() callback = functools.partial(connection_ready, sock) io_loop.add_handler(sock.fileno(), callback, io_loop.READ) io_loop.start() .. testoutput:: :hide: By default, a newly-constructed `IOLoop` becomes the thread's current `IOLoop`, unless there already is a current `IOLoop`. This behavior can be controlled with the ``make_current`` argument to the `IOLoop` constructor: if ``make_current=True``, the new `IOLoop` will always try to become current and it raises an error if there is already a current instance. If ``make_current=False``, the new `IOLoop` will not try to become current. In general, an `IOLoop` cannot survive a fork or be shared across processes in any way. When multiple processes are being used, each process should create its own `IOLoop`, which also implies that any objects which depend on the `IOLoop` (such as `.AsyncHTTPClient`) must also be created in the child processes. As a guideline, anything that starts processes (including the `tornado.process` and `multiprocessing` modules) should do so as early as possible, ideally the first thing the application does after loading its configuration in ``main()``. .. versionchanged:: 4.2 Added the ``make_current`` keyword argument to the `IOLoop` constructor. .. versionchanged:: 5.0 Uses the `asyncio` event loop by default. The ``IOLoop.configure`` method cannot be used on Python 3 except to redundantly specify the `asyncio` event loop. """ # These constants were originally based on constants from the epoll module. NONE = 0 READ = 0x001 WRITE = 0x004 ERROR = 0x018 # In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops. _ioloop_for_asyncio = dict() # type: Dict[asyncio.AbstractEventLoop, IOLoop] @classmethod def configure( cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any ) -> None: if asyncio is not None: from tornado.platform.asyncio import BaseAsyncIOLoop if isinstance(impl, str): impl = import_object(impl) if isinstance(impl, type) and not issubclass(impl, BaseAsyncIOLoop): raise RuntimeError( "only AsyncIOLoop is allowed when asyncio is available" ) super(IOLoop, cls).configure(impl, **kwargs) @staticmethod def instance() -> "IOLoop": """Deprecated alias for `IOLoop.current()`. .. versionchanged:: 5.0 Previously, this method returned a global singleton `IOLoop`, in contrast with the per-thread `IOLoop` returned by `current()`. In nearly all cases the two were the same (when they differed, it was generally used from non-Tornado threads to communicate back to the main thread's `IOLoop`). This distinction is not present in `asyncio`, so in order to facilitate integration with that package `instance()` was changed to be an alias to `current()`. Applications using the cross-thread communications aspect of `instance()` should instead set their own global variable to point to the `IOLoop` they want to use. .. deprecated:: 5.0 """ return IOLoop.current() def install(self) -> None: """Deprecated alias for `make_current()`. .. versionchanged:: 5.0 Previously, this method would set this `IOLoop` as the global singleton used by `IOLoop.instance()`. Now that `instance()` is an alias for `current()`, `install()` is an alias for `make_current()`. .. deprecated:: 5.0 """ self.make_current() @staticmethod def clear_instance() -> None: """Deprecated alias for `clear_current()`. .. versionchanged:: 5.0 Previously, this method would clear the `IOLoop` used as the global singleton by `IOLoop.instance()`. Now that `instance()` is an alias for `current()`, `clear_instance()` is an alias for `clear_current()`. .. deprecated:: 5.0 """ IOLoop.clear_current() @typing.overload @staticmethod def current() -> "IOLoop": pass @typing.overload @staticmethod def current(instance: bool = True) -> Optional["IOLoop"]: # noqa: F811 pass @staticmethod def current(instance: bool = True) -> Optional["IOLoop"]: # noqa: F811 """Returns the current thread's `IOLoop`. If an `IOLoop` is currently running or has been marked as current by `make_current`, returns that instance. If there is no current `IOLoop` and ``instance`` is true, creates one. .. versionchanged:: 4.1 Added ``instance`` argument to control the fallback to `IOLoop.instance()`. .. versionchanged:: 5.0 On Python 3, control of the current `IOLoop` is delegated to `asyncio`, with this and other methods as pass-through accessors. The ``instance`` argument now controls whether an `IOLoop` is created automatically when there is none, instead of whether we fall back to `IOLoop.instance()` (which is now an alias for this method). ``instance=False`` is deprecated, since even if we do not create an `IOLoop`, this method may initialize the asyncio loop. """ try: loop = asyncio.get_event_loop() except (RuntimeError, AssertionError): if not instance: return None raise try: return IOLoop._ioloop_for_asyncio[loop] except KeyError: if instance: from tornado.platform.asyncio import AsyncIOMainLoop current = AsyncIOMainLoop(make_current=True) # type: Optional[IOLoop] else: current = None return current def make_current(self) -> None: """Makes this the `IOLoop` for the current thread. An `IOLoop` automatically becomes current for its thread when it is started, but it is sometimes useful to call `make_current` explicitly before starting the `IOLoop`, so that code run at startup time can find the right instance. .. versionchanged:: 4.1 An `IOLoop` created while there is no current `IOLoop` will automatically become current. .. versionchanged:: 5.0 This method also sets the current `asyncio` event loop. """ # The asyncio event loops override this method. raise NotImplementedError() @staticmethod def clear_current() -> None: """Clears the `IOLoop` for the current thread. Intended primarily for use by test frameworks in between tests. .. versionchanged:: 5.0 This method also clears the current `asyncio` event loop. """ old = IOLoop.current(instance=False) if old is not None: old._clear_current_hook() if asyncio is None: IOLoop._current.instance = None def _clear_current_hook(self) -> None: """Instance method called when an IOLoop ceases to be current. May be overridden by subclasses as a counterpart to make_current. """ pass @classmethod def configurable_base(cls) -> Type[Configurable]: return IOLoop @classmethod def configurable_default(cls) -> Type[Configurable]: from tornado.platform.asyncio import AsyncIOLoop return AsyncIOLoop def initialize(self, make_current: Optional[bool] = None) -> None: if make_current is None: if IOLoop.current(instance=False) is None: self.make_current() elif make_current: current = IOLoop.current(instance=False) # AsyncIO loops can already be current by this point. if current is not None and current is not self: raise RuntimeError("current IOLoop already exists") self.make_current() def close(self, all_fds: bool = False) -> None: """Closes the `IOLoop`, freeing any resources used. If ``all_fds`` is true, all file descriptors registered on the IOLoop will be closed (not just the ones created by the `IOLoop` itself). Many applications will only use a single `IOLoop` that runs for the entire lifetime of the process. In that case closing the `IOLoop` is not necessary since everything will be cleaned up when the process exits. `IOLoop.close` is provided mainly for scenarios such as unit tests, which create and destroy a large number of ``IOLoops``. An `IOLoop` must be completely stopped before it can be closed. This means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must be allowed to return before attempting to call `IOLoop.close()`. Therefore the call to `close` will usually appear just after the call to `start` rather than near the call to `stop`. .. versionchanged:: 3.1 If the `IOLoop` implementation supports non-integer objects for "file descriptors", those objects will have their ``close`` method when ``all_fds`` is true. """ raise NotImplementedError() @typing.overload def add_handler( self, fd: int, handler: Callable[[int, int], None], events: int ) -> None: pass @typing.overload # noqa: F811 def add_handler( self, fd: _S, handler: Callable[[_S, int], None], events: int ) -> None: pass def add_handler( # noqa: F811 self, fd: Union[int, _Selectable], handler: Callable[..., None], events: int ) -> None: """Registers the given handler to receive the given events for ``fd``. The ``fd`` argument may either be an integer file descriptor or a file-like object with a ``fileno()`` and ``close()`` method. The ``events`` argument is a bitwise or of the constants ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. When an event occurs, ``handler(fd, events)`` will be run. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def update_handler(self, fd: Union[int, _Selectable], events: int) -> None: """Changes the events we listen for ``fd``. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def remove_handler(self, fd: Union[int, _Selectable]) -> None: """Stop listening for events on ``fd``. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def start(self) -> None: """Starts the I/O loop. The loop will run until one of the callbacks calls `stop()`, which will make the loop stop after the current event iteration completes. """ raise NotImplementedError() def _setup_logging(self) -> None: """The IOLoop catches and logs exceptions, so it's important that log output be visible. However, python's default behavior for non-root loggers (prior to python 3.2) is to print an unhelpful "no handlers could be found" message rather than the actual log entry, so we must explicitly configure logging if we've made it this far without anything. This method should be called from start() in subclasses. """ if not any( [ logging.getLogger().handlers, logging.getLogger("tornado").handlers, logging.getLogger("tornado.application").handlers, ] ): logging.basicConfig() def stop(self) -> None: """Stop the I/O loop. If the event loop is not currently running, the next call to `start()` will return immediately. Note that even after `stop` has been called, the `IOLoop` is not completely stopped until `IOLoop.start` has also returned. Some work that was scheduled before the call to `stop` may still be run before the `IOLoop` shuts down. """ raise NotImplementedError() def run_sync(self, func: Callable, timeout: Optional[float] = None) -> Any: """Starts the `IOLoop`, runs the given function, and stops the loop. The function must return either an awaitable object or ``None``. If the function returns an awaitable object, the `IOLoop` will run until the awaitable is resolved (and `run_sync()` will return the awaitable's result). If it raises an exception, the `IOLoop` will stop and the exception will be re-raised to the caller. The keyword-only argument ``timeout`` may be used to set a maximum duration for the function. If the timeout expires, a `tornado.util.TimeoutError` is raised. This method is useful to allow asynchronous calls in a ``main()`` function:: async def main(): # do stuff... if __name__ == '__main__': IOLoop.current().run_sync(main) .. versionchanged:: 4.3 Returning a non-``None``, non-awaitable value is now an error. .. versionchanged:: 5.0 If a timeout occurs, the ``func`` coroutine will be cancelled. """ future_cell = [None] # type: List[Optional[Future]] def run() -> None: try: result = func() if result is not None: from tornado.gen import convert_yielded result = convert_yielded(result) except Exception: fut = Future() # type: Future[Any] future_cell[0] = fut future_set_exc_info(fut, sys.exc_info()) else: if is_future(result): future_cell[0] = result else: fut = Future() future_cell[0] = fut fut.set_result(result) assert future_cell[0] is not None self.add_future(future_cell[0], lambda future: self.stop()) self.add_callback(run) if timeout is not None: def timeout_callback() -> None: # If we can cancel the future, do so and wait on it. If not, # Just stop the loop and return with the task still pending. # (If we neither cancel nor wait for the task, a warning # will be logged). assert future_cell[0] is not None if not future_cell[0].cancel(): self.stop() timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback) self.start() if timeout is not None: self.remove_timeout(timeout_handle) assert future_cell[0] is not None if future_cell[0].cancelled() or not future_cell[0].done(): raise TimeoutError("Operation timed out after %s seconds" % timeout) return future_cell[0].result() def time(self) -> float: """Returns the current time according to the `IOLoop`'s clock. The return value is a floating-point number relative to an unspecified time in the past. Historically, the IOLoop could be customized to use e.g. `time.monotonic` instead of `time.time`, but this is not currently supported and so this method is equivalent to `time.time`. """ return time.time() def add_timeout( self, deadline: Union[float, datetime.timedelta], callback: Callable[..., None], *args: Any, **kwargs: Any ) -> object: """Runs the ``callback`` at the time ``deadline`` from the I/O loop. Returns an opaque handle that may be passed to `remove_timeout` to cancel. ``deadline`` may be a number denoting a time (on the same scale as `IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time. Since Tornado 4.0, `call_later` is a more convenient alternative for the relative case since it does not require a timedelta object. Note that it is not safe to call `add_timeout` from other threads. Instead, you must use `add_callback` to transfer control to the `IOLoop`'s thread, and then call `add_timeout` from there. Subclasses of IOLoop must implement either `add_timeout` or `call_at`; the default implementations of each will call the other. `call_at` is usually easier to implement, but subclasses that wish to maintain compatibility with Tornado versions prior to 4.0 must use `add_timeout` instead. .. versionchanged:: 4.0 Now passes through ``*args`` and ``**kwargs`` to the callback. """ if isinstance(deadline, numbers.Real): return self.call_at(deadline, callback, *args, **kwargs) elif isinstance(deadline, datetime.timedelta): return self.call_at( self.time() + deadline.total_seconds(), callback, *args, **kwargs ) else: raise TypeError("Unsupported deadline %r" % deadline) def call_later( self, delay: float, callback: Callable[..., None], *args: Any, **kwargs: Any ) -> object: """Runs the ``callback`` after ``delay`` seconds have passed. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.call_at(self.time() + delay, callback, *args, **kwargs) def call_at( self, when: float, callback: Callable[..., None], *args: Any, **kwargs: Any ) -> object: """Runs the ``callback`` at the absolute time designated by ``when``. ``when`` must be a number using the same reference point as `IOLoop.time`. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.add_timeout(when, callback, *args, **kwargs) def remove_timeout(self, timeout: object) -> None: """Cancels a pending timeout. The argument is a handle as returned by `add_timeout`. It is safe to call `remove_timeout` even if the callback has already been run. """ raise NotImplementedError() def add_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None: """Calls the given callback on the next I/O loop iteration. It is safe to call this method from any thread at any time, except from a signal handler. Note that this is the **only** method in `IOLoop` that makes this thread-safety guarantee; all other interaction with the `IOLoop` must be done from that `IOLoop`'s thread. `add_callback()` may be used to transfer control from other threads to the `IOLoop`'s thread. To add a callback from a signal handler, see `add_callback_from_signal`. """ raise NotImplementedError() def add_callback_from_signal( self, callback: Callable, *args: Any, **kwargs: Any ) -> None: """Calls the given callback on the next I/O loop iteration. Safe for use from a Python signal handler; should not be used otherwise. """ raise NotImplementedError() def spawn_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None: """Calls the given callback on the next IOLoop iteration. As of Tornado 6.0, this method is equivalent to `add_callback`. .. versionadded:: 4.0 """ self.add_callback(callback, *args, **kwargs) def add_future( self, future: "Union[Future[_T], concurrent.futures.Future[_T]]", callback: Callable[["Future[_T]"], None], ) -> None: """Schedules a callback on the ``IOLoop`` when the given `.Future` is finished. The callback is invoked with one argument, the `.Future`. This method only accepts `.Future` objects and not other awaitables (unlike most of Tornado where the two are interchangeable). """ if isinstance(future, Future): # Note that we specifically do not want the inline behavior of # tornado.concurrent.future_add_done_callback. We always want # this callback scheduled on the next IOLoop iteration (which # asyncio.Future always does). # # Wrap the callback in self._run_callback so we control # the error logging (i.e. it goes to tornado.log.app_log # instead of asyncio's log). future.add_done_callback( lambda f: self._run_callback(functools.partial(callback, future)) ) else: assert is_future(future) # For concurrent futures, we use self.add_callback, so # it's fine if future_add_done_callback inlines that call. future_add_done_callback( future, lambda f: self.add_callback(callback, future) ) def run_in_executor( self, executor: Optional[concurrent.futures.Executor], func: Callable[..., _T], *args: Any ) -> Awaitable[_T]: """Runs a function in a ``concurrent.futures.Executor``. If ``executor`` is ``None``, the IO loop's default executor will be used. Use `functools.partial` to pass keyword arguments to ``func``. .. versionadded:: 5.0 """ if executor is None: if not hasattr(self, "_executor"): from tornado.process import cpu_count self._executor = concurrent.futures.ThreadPoolExecutor( max_workers=(cpu_count() * 5) ) # type: concurrent.futures.Executor executor = self._executor c_future = executor.submit(func, *args) # Concurrent Futures are not usable with await. Wrap this in a # Tornado Future instead, using self.add_future for thread-safety. t_future = Future() # type: Future[_T] self.add_future(c_future, lambda f: chain_future(f, t_future)) return t_future def set_default_executor(self, executor: concurrent.futures.Executor) -> None: """Sets the default executor to use with :meth:`run_in_executor`. .. versionadded:: 5.0 """ self._executor = executor def _run_callback(self, callback: Callable[[], Any]) -> None: """Runs a callback with error handling. .. versionchanged:: 6.0 CancelledErrors are no longer logged. """ try: ret = callback() if ret is not None: from tornado import gen # Functions that return Futures typically swallow all # exceptions and store them in the Future. If a Future # makes it out to the IOLoop, ensure its exception (if any) # gets logged too. try: ret = gen.convert_yielded(ret) except gen.BadYieldError: # It's not unusual for add_callback to be used with # methods returning a non-None and non-yieldable # result, which should just be ignored. pass else: self.add_future(ret, self._discard_future_result) except asyncio.CancelledError: pass except Exception: app_log.error("Exception in callback %r", callback, exc_info=True) def _discard_future_result(self, future: Future) -> None: """Avoid unhandled-exception warnings from spawned coroutines.""" future.result() def split_fd( self, fd: Union[int, _Selectable] ) -> Tuple[int, Union[int, _Selectable]]: # """Returns an (fd, obj) pair from an ``fd`` parameter. # We accept both raw file descriptors and file-like objects as # input to `add_handler` and related methods. When a file-like # object is passed, we must retain the object itself so we can # close it correctly when the `IOLoop` shuts down, but the # poller interfaces favor file descriptors (they will accept # file-like objects and call ``fileno()`` for you, but they # always return the descriptor itself). # This method is provided for use by `IOLoop` subclasses and should # not generally be used by application code. # .. versionadded:: 4.0 # """ if isinstance(fd, int): return fd, fd return fd.fileno(), fd def close_fd(self, fd: Union[int, _Selectable]) -> None: # """Utility method to close an ``fd``. # If ``fd`` is a file-like object, we close it directly; otherwise # we use `os.close`. # This method is provided for use by `IOLoop` subclasses (in # implementations of ``IOLoop.close(all_fds=True)`` and should # not generally be used by application code. # .. versionadded:: 4.0 # """ try: if isinstance(fd, int): os.close(fd) else: fd.close() except OSError: pass class _Timeout(object): """An IOLoop timeout, a UNIX timestamp and a callback""" # Reduce memory overhead when there are lots of pending callbacks __slots__ = ["deadline", "callback", "tdeadline"] def __init__( self, deadline: float, callback: Callable[[], None], io_loop: IOLoop ) -> None: if not isinstance(deadline, numbers.Real): raise TypeError("Unsupported deadline %r" % deadline) self.deadline = deadline self.callback = callback self.tdeadline = ( deadline, next(io_loop._timeout_counter), ) # type: Tuple[float, int] # Comparison methods to sort by deadline, with object id as a tiebreaker # to guarantee a consistent ordering. The heapq module uses __le__ # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons # use __lt__). def __lt__(self, other: "_Timeout") -> bool: return self.tdeadline < other.tdeadline def __le__(self, other: "_Timeout") -> bool: return self.tdeadline <= other.tdeadline class PeriodicCallback(object): """Schedules the given callback to be called periodically. The callback is called every ``callback_time`` milliseconds. Note that the timeout is given in milliseconds, while most other time-related functions in Tornado use seconds. If ``jitter`` is specified, each callback time will be randomly selected within a window of ``jitter * callback_time`` milliseconds. Jitter can be used to reduce alignment of events with similar periods. A jitter of 0.1 means allowing a 10% variation in callback time. The window is centered on ``callback_time`` so the total number of calls within a given interval should not be significantly affected by adding jitter. If the callback runs for longer than ``callback_time`` milliseconds, subsequent invocations will be skipped to get back on schedule. `start` must be called after the `PeriodicCallback` is created. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. .. versionchanged:: 5.1 The ``jitter`` argument is added. """ def __init__( self, callback: Callable[[], None], callback_time: float, jitter: float = 0 ) -> None: self.callback = callback if callback_time <= 0: raise ValueError("Periodic callback must have a positive callback_time") self.callback_time = callback_time self.jitter = jitter self._running = False self._timeout = None # type: object def start(self) -> None: """Starts the timer.""" # Looking up the IOLoop here allows to first instantiate the # PeriodicCallback in another thread, then start it using # IOLoop.add_callback(). self.io_loop = IOLoop.current() self._running = True self._next_timeout = self.io_loop.time() self._schedule_next() def stop(self) -> None: """Stops the timer.""" self._running = False if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None def is_running(self) -> bool: """Returns ``True`` if this `.PeriodicCallback` has been started. .. versionadded:: 4.1 """ return self._running def _run(self) -> None: if not self._running: return try: return self.callback() except Exception: app_log.error("Exception in callback %r", self.callback, exc_info=True) finally: self._schedule_next() def _schedule_next(self) -> None: if self._running: self._update_next(self.io_loop.time()) self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run) def _update_next(self, current_time: float) -> None: callback_time_sec = self.callback_time / 1000.0 if self.jitter: # apply jitter fraction callback_time_sec *= 1 + (self.jitter * (random.random() - 0.5)) if self._next_timeout <= current_time: # The period should be measured from the start of one call # to the start of the next. If one call takes too long, # skip cycles to get back to a multiple of the original # schedule. self._next_timeout += ( math.floor((current_time - self._next_timeout) / callback_time_sec) + 1 ) * callback_time_sec else: # If the clock moved backwards, ensure we advance the next # timeout instead of recomputing the same value again. # This may result in long gaps between callbacks if the # clock jumps backwards by a lot, but the far more common # scenario is a small NTP adjustment that should just be # ignored. # # Note that on some systems if time.time() runs slower # than time.monotonic() (most common on windows), we # effectively experience a small backwards time jump on # every iteration because PeriodicCallback uses # time.time() while asyncio schedules callbacks using # time.monotonic(). # https://github.com/tornadoweb/tornado/issues/2333 self._next_timeout += callback_time_sec tornado-6.1.0/tornado/iostream.py000066400000000000000000001776531374705040500170570ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility classes to write to and read from non-blocking files and sockets. Contents: * `BaseIOStream`: Generic interface for reading and writing. * `IOStream`: Implementation of BaseIOStream using non-blocking sockets. * `SSLIOStream`: SSL-aware version of IOStream. * `PipeIOStream`: Pipe-based IOStream implementation. """ import asyncio import collections import errno import io import numbers import os import socket import ssl import sys import re from tornado.concurrent import Future, future_set_result_unless_cancelled from tornado import ioloop from tornado.log import gen_log from tornado.netutil import ssl_wrap_socket, _client_ssl_defaults, _server_ssl_defaults from tornado.util import errno_from_exception import typing from typing import ( Union, Optional, Awaitable, Callable, Pattern, Any, Dict, TypeVar, Tuple, ) from types import TracebackType if typing.TYPE_CHECKING: from typing import Deque, List, Type # noqa: F401 _IOStreamType = TypeVar("_IOStreamType", bound="IOStream") # These errnos indicate that a connection has been abruptly terminated. # They should be caught and handled less noisily than other errors. _ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE, errno.ETIMEDOUT) if hasattr(errno, "WSAECONNRESET"): _ERRNO_CONNRESET += ( # type: ignore errno.WSAECONNRESET, # type: ignore errno.WSAECONNABORTED, # type: ignore errno.WSAETIMEDOUT, # type: ignore ) if sys.platform == "darwin": # OSX appears to have a race condition that causes send(2) to return # EPROTOTYPE if called while a socket is being torn down: # http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ # Since the socket is being closed anyway, treat this as an ECONNRESET # instead of an unexpected error. _ERRNO_CONNRESET += (errno.EPROTOTYPE,) # type: ignore _WINDOWS = sys.platform.startswith("win") class StreamClosedError(IOError): """Exception raised by `IOStream` methods when the stream is closed. Note that the close callback is scheduled to run *after* other callbacks on the stream (to allow for buffered data to be processed), so you may see this error before you see the close callback. The ``real_error`` attribute contains the underlying error that caused the stream to close (if any). .. versionchanged:: 4.3 Added the ``real_error`` attribute. """ def __init__(self, real_error: Optional[BaseException] = None) -> None: super().__init__("Stream is closed") self.real_error = real_error class UnsatisfiableReadError(Exception): """Exception raised when a read cannot be satisfied. Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes`` argument. """ pass class StreamBufferFullError(Exception): """Exception raised by `IOStream` methods when the buffer is full. """ class _StreamBuffer(object): """ A specialized buffer that tries to avoid copies when large pieces of data are encountered. """ def __init__(self) -> None: # A sequence of (False, bytearray) and (True, memoryview) objects self._buffers = ( collections.deque() ) # type: Deque[Tuple[bool, Union[bytearray, memoryview]]] # Position in the first buffer self._first_pos = 0 self._size = 0 def __len__(self) -> int: return self._size # Data above this size will be appended separately instead # of extending an existing bytearray _large_buf_threshold = 2048 def append(self, data: Union[bytes, bytearray, memoryview]) -> None: """ Append the given piece of data (should be a buffer-compatible object). """ size = len(data) if size > self._large_buf_threshold: if not isinstance(data, memoryview): data = memoryview(data) self._buffers.append((True, data)) elif size > 0: if self._buffers: is_memview, b = self._buffers[-1] new_buf = is_memview or len(b) >= self._large_buf_threshold else: new_buf = True if new_buf: self._buffers.append((False, bytearray(data))) else: b += data # type: ignore self._size += size def peek(self, size: int) -> memoryview: """ Get a view over at most ``size`` bytes (possibly fewer) at the current buffer position. """ assert size > 0 try: is_memview, b = self._buffers[0] except IndexError: return memoryview(b"") pos = self._first_pos if is_memview: return typing.cast(memoryview, b[pos : pos + size]) else: return memoryview(b)[pos : pos + size] def advance(self, size: int) -> None: """ Advance the current buffer position by ``size`` bytes. """ assert 0 < size <= self._size self._size -= size pos = self._first_pos buffers = self._buffers while buffers and size > 0: is_large, b = buffers[0] b_remain = len(b) - size - pos if b_remain <= 0: buffers.popleft() size -= len(b) - pos pos = 0 elif is_large: pos += size size = 0 else: # Amortized O(1) shrink for Python 2 pos += size if len(b) <= 2 * pos: del typing.cast(bytearray, b)[:pos] pos = 0 size = 0 assert size == 0 self._first_pos = pos class BaseIOStream(object): """A utility class to write to and read from a non-blocking file or socket. We support a non-blocking ``write()`` and a family of ``read_*()`` methods. When the operation completes, the ``Awaitable`` will resolve with the data read (or ``None`` for ``write()``). All outstanding ``Awaitables`` will resolve with a `StreamClosedError` when the stream is closed; `.BaseIOStream.set_close_callback` can also be used to be notified of a closed stream. When a stream is closed due to an error, the IOStream's ``error`` attribute contains the exception object. Subclasses must implement `fileno`, `close_fd`, `write_to_fd`, `read_from_fd`, and optionally `get_fd_error`. """ def __init__( self, max_buffer_size: Optional[int] = None, read_chunk_size: Optional[int] = None, max_write_buffer_size: Optional[int] = None, ) -> None: """`BaseIOStream` constructor. :arg max_buffer_size: Maximum amount of incoming data to buffer; defaults to 100MB. :arg read_chunk_size: Amount of data to read at one time from the underlying transport; defaults to 64KB. :arg max_write_buffer_size: Amount of outgoing data to buffer; defaults to unlimited. .. versionchanged:: 4.0 Add the ``max_write_buffer_size`` parameter. Changed default ``read_chunk_size`` to 64KB. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ self.io_loop = ioloop.IOLoop.current() self.max_buffer_size = max_buffer_size or 104857600 # A chunk size that is too close to max_buffer_size can cause # spurious failures. self.read_chunk_size = min(read_chunk_size or 65536, self.max_buffer_size // 2) self.max_write_buffer_size = max_write_buffer_size self.error = None # type: Optional[BaseException] self._read_buffer = bytearray() self._read_buffer_pos = 0 self._read_buffer_size = 0 self._user_read_buffer = False self._after_user_read_buffer = None # type: Optional[bytearray] self._write_buffer = _StreamBuffer() self._total_write_index = 0 self._total_write_done_index = 0 self._read_delimiter = None # type: Optional[bytes] self._read_regex = None # type: Optional[Pattern] self._read_max_bytes = None # type: Optional[int] self._read_bytes = None # type: Optional[int] self._read_partial = False self._read_until_close = False self._read_future = None # type: Optional[Future] self._write_futures = ( collections.deque() ) # type: Deque[Tuple[int, Future[None]]] self._close_callback = None # type: Optional[Callable[[], None]] self._connect_future = None # type: Optional[Future[IOStream]] # _ssl_connect_future should be defined in SSLIOStream # but it's here so we can clean it up in _signal_closed # TODO: refactor that so subclasses can add additional futures # to be cancelled. self._ssl_connect_future = None # type: Optional[Future[SSLIOStream]] self._connecting = False self._state = None # type: Optional[int] self._closed = False def fileno(self) -> Union[int, ioloop._Selectable]: """Returns the file descriptor for this stream.""" raise NotImplementedError() def close_fd(self) -> None: """Closes the file underlying this stream. ``close_fd`` is called by `BaseIOStream` and should not be called elsewhere; other users should call `close` instead. """ raise NotImplementedError() def write_to_fd(self, data: memoryview) -> int: """Attempts to write ``data`` to the underlying file. Returns the number of bytes written. """ raise NotImplementedError() def read_from_fd(self, buf: Union[bytearray, memoryview]) -> Optional[int]: """Attempts to read from the underlying file. Reads up to ``len(buf)`` bytes, storing them in the buffer. Returns the number of bytes read. Returns None if there was nothing to read (the socket returned `~errno.EWOULDBLOCK` or equivalent), and zero on EOF. .. versionchanged:: 5.0 Interface redesigned to take a buffer and return a number of bytes instead of a freshly-allocated object. """ raise NotImplementedError() def get_fd_error(self) -> Optional[Exception]: """Returns information about any error on the underlying file. This method is called after the `.IOLoop` has signaled an error on the file descriptor, and should return an Exception (such as `socket.error` with additional information, or None if no such information is available. """ return None def read_until_regex( self, regex: bytes, max_bytes: Optional[int] = None ) -> Awaitable[bytes]: """Asynchronously read until we have matched the given regex. The result includes the data that matches the regex and anything that came before it. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the regex is not satisfied. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """ future = self._start_read() self._read_regex = re.compile(regex) self._read_max_bytes = max_bytes try: self._try_inline_read() except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=e) return future except: # Ensure that the future doesn't log an error because its # failure was never examined. future.add_done_callback(lambda f: f.exception()) raise return future def read_until( self, delimiter: bytes, max_bytes: Optional[int] = None ) -> Awaitable[bytes]: """Asynchronously read until we have found the given delimiter. The result includes all the data read including the delimiter. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the delimiter is not found. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """ future = self._start_read() self._read_delimiter = delimiter self._read_max_bytes = max_bytes try: self._try_inline_read() except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=e) return future except: future.add_done_callback(lambda f: f.exception()) raise return future def read_bytes(self, num_bytes: int, partial: bool = False) -> Awaitable[bytes]: """Asynchronously read a number of bytes. If ``partial`` is true, data is returned as soon as we have any bytes to return (but never more than ``num_bytes``) .. versionchanged:: 4.0 Added the ``partial`` argument. The callback argument is now optional and a `.Future` will be returned if it is omitted. .. versionchanged:: 6.0 The ``callback`` and ``streaming_callback`` arguments have been removed. Use the returned `.Future` (and ``partial=True`` for ``streaming_callback``) instead. """ future = self._start_read() assert isinstance(num_bytes, numbers.Integral) self._read_bytes = num_bytes self._read_partial = partial try: self._try_inline_read() except: future.add_done_callback(lambda f: f.exception()) raise return future def read_into(self, buf: bytearray, partial: bool = False) -> Awaitable[int]: """Asynchronously read a number of bytes. ``buf`` must be a writable buffer into which data will be read. If ``partial`` is true, the callback is run as soon as any bytes have been read. Otherwise, it is run when the ``buf`` has been entirely filled with read data. .. versionadded:: 5.0 .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """ future = self._start_read() # First copy data already in read buffer available_bytes = self._read_buffer_size n = len(buf) if available_bytes >= n: end = self._read_buffer_pos + n buf[:] = memoryview(self._read_buffer)[self._read_buffer_pos : end] del self._read_buffer[:end] self._after_user_read_buffer = self._read_buffer elif available_bytes > 0: buf[:available_bytes] = memoryview(self._read_buffer)[ self._read_buffer_pos : ] # Set up the supplied buffer as our temporary read buffer. # The original (if it had any data remaining) has been # saved for later. self._user_read_buffer = True self._read_buffer = buf self._read_buffer_pos = 0 self._read_buffer_size = available_bytes self._read_bytes = n self._read_partial = partial try: self._try_inline_read() except: future.add_done_callback(lambda f: f.exception()) raise return future def read_until_close(self) -> Awaitable[bytes]: """Asynchronously reads all data from the socket until it is closed. This will buffer all available data until ``max_buffer_size`` is reached. If flow control or cancellation are desired, use a loop with `read_bytes(partial=True) <.read_bytes>` instead. .. versionchanged:: 4.0 The callback argument is now optional and a `.Future` will be returned if it is omitted. .. versionchanged:: 6.0 The ``callback`` and ``streaming_callback`` arguments have been removed. Use the returned `.Future` (and `read_bytes` with ``partial=True`` for ``streaming_callback``) instead. """ future = self._start_read() if self.closed(): self._finish_read(self._read_buffer_size, False) return future self._read_until_close = True try: self._try_inline_read() except: future.add_done_callback(lambda f: f.exception()) raise return future def write(self, data: Union[bytes, memoryview]) -> "Future[None]": """Asynchronously write the given data to this stream. This method returns a `.Future` that resolves (with a result of ``None``) when the write has been completed. The ``data`` argument may be of type `bytes` or `memoryview`. .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. .. versionchanged:: 4.5 Added support for `memoryview` arguments. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """ self._check_closed() if data: if ( self.max_write_buffer_size is not None and len(self._write_buffer) + len(data) > self.max_write_buffer_size ): raise StreamBufferFullError("Reached maximum write buffer size") self._write_buffer.append(data) self._total_write_index += len(data) future = Future() # type: Future[None] future.add_done_callback(lambda f: f.exception()) self._write_futures.append((self._total_write_index, future)) if not self._connecting: self._handle_write() if self._write_buffer: self._add_io_state(self.io_loop.WRITE) self._maybe_add_error_listener() return future def set_close_callback(self, callback: Optional[Callable[[], None]]) -> None: """Call the given callback when the stream is closed. This mostly is not necessary for applications that use the `.Future` interface; all outstanding ``Futures`` will resolve with a `StreamClosedError` when the stream is closed. However, it is still useful as a way to signal that the stream has been closed while no other read or write is in progress. Unlike other callback-based interfaces, ``set_close_callback`` was not removed in Tornado 6.0. """ self._close_callback = callback self._maybe_add_error_listener() def close( self, exc_info: Union[ None, bool, BaseException, Tuple[ "Optional[Type[BaseException]]", Optional[BaseException], Optional[TracebackType], ], ] = False, ) -> None: """Close this stream. If ``exc_info`` is true, set the ``error`` attribute to the current exception from `sys.exc_info` (or if ``exc_info`` is a tuple, use that instead of `sys.exc_info`). """ if not self.closed(): if exc_info: if isinstance(exc_info, tuple): self.error = exc_info[1] elif isinstance(exc_info, BaseException): self.error = exc_info else: exc_info = sys.exc_info() if any(exc_info): self.error = exc_info[1] if self._read_until_close: self._read_until_close = False self._finish_read(self._read_buffer_size, False) elif self._read_future is not None: # resolve reads that are pending and ready to complete try: pos = self._find_read_pos() except UnsatisfiableReadError: pass else: if pos is not None: self._read_from_buffer(pos) if self._state is not None: self.io_loop.remove_handler(self.fileno()) self._state = None self.close_fd() self._closed = True self._signal_closed() def _signal_closed(self) -> None: futures = [] # type: List[Future] if self._read_future is not None: futures.append(self._read_future) self._read_future = None futures += [future for _, future in self._write_futures] self._write_futures.clear() if self._connect_future is not None: futures.append(self._connect_future) self._connect_future = None for future in futures: if not future.done(): future.set_exception(StreamClosedError(real_error=self.error)) # Reference the exception to silence warnings. Annoyingly, # this raises if the future was cancelled, but just # returns any other error. try: future.exception() except asyncio.CancelledError: pass if self._ssl_connect_future is not None: # _ssl_connect_future expects to see the real exception (typically # an ssl.SSLError), not just StreamClosedError. if not self._ssl_connect_future.done(): if self.error is not None: self._ssl_connect_future.set_exception(self.error) else: self._ssl_connect_future.set_exception(StreamClosedError()) self._ssl_connect_future.exception() self._ssl_connect_future = None if self._close_callback is not None: cb = self._close_callback self._close_callback = None self.io_loop.add_callback(cb) # Clear the buffers so they can be cleared immediately even # if the IOStream object is kept alive by a reference cycle. # TODO: Clear the read buffer too; it currently breaks some tests. self._write_buffer = None # type: ignore def reading(self) -> bool: """Returns ``True`` if we are currently reading from the stream.""" return self._read_future is not None def writing(self) -> bool: """Returns ``True`` if we are currently writing to the stream.""" return bool(self._write_buffer) def closed(self) -> bool: """Returns ``True`` if the stream has been closed.""" return self._closed def set_nodelay(self, value: bool) -> None: """Sets the no-delay flag for this stream. By default, data written to TCP streams may be held for a time to make the most efficient use of bandwidth (according to Nagle's algorithm). The no-delay flag requests that data be written as soon as possible, even if doing so would consume additional bandwidth. This flag is currently defined only for TCP-based ``IOStreams``. .. versionadded:: 3.1 """ pass def _handle_connect(self) -> None: raise NotImplementedError() def _handle_events(self, fd: Union[int, ioloop._Selectable], events: int) -> None: if self.closed(): gen_log.warning("Got events for closed stream %s", fd) return try: if self._connecting: # Most IOLoops will report a write failed connect # with the WRITE event, but SelectIOLoop reports a # READ as well so we must check for connecting before # either. self._handle_connect() if self.closed(): return if events & self.io_loop.READ: self._handle_read() if self.closed(): return if events & self.io_loop.WRITE: self._handle_write() if self.closed(): return if events & self.io_loop.ERROR: self.error = self.get_fd_error() # We may have queued up a user callback in _handle_read or # _handle_write, so don't close the IOStream until those # callbacks have had a chance to run. self.io_loop.add_callback(self.close) return state = self.io_loop.ERROR if self.reading(): state |= self.io_loop.READ if self.writing(): state |= self.io_loop.WRITE if state == self.io_loop.ERROR and self._read_buffer_size == 0: # If the connection is idle, listen for reads too so # we can tell if the connection is closed. If there is # data in the read buffer we won't run the close callback # yet anyway, so we don't need to listen in this case. state |= self.io_loop.READ if state != self._state: assert ( self._state is not None ), "shouldn't happen: _handle_events without self._state" self._state = state self.io_loop.update_handler(self.fileno(), self._state) except UnsatisfiableReadError as e: gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=e) except Exception as e: gen_log.error("Uncaught exception, closing connection.", exc_info=True) self.close(exc_info=e) raise def _read_to_buffer_loop(self) -> Optional[int]: # This method is called from _handle_read and _try_inline_read. if self._read_bytes is not None: target_bytes = self._read_bytes # type: Optional[int] elif self._read_max_bytes is not None: target_bytes = self._read_max_bytes elif self.reading(): # For read_until without max_bytes, or # read_until_close, read as much as we can before # scanning for the delimiter. target_bytes = None else: target_bytes = 0 next_find_pos = 0 while not self.closed(): # Read from the socket until we get EWOULDBLOCK or equivalent. # SSL sockets do some internal buffering, and if the data is # sitting in the SSL object's buffer select() and friends # can't see it; the only way to find out if it's there is to # try to read it. if self._read_to_buffer() == 0: break # If we've read all the bytes we can use, break out of # this loop. # If we've reached target_bytes, we know we're done. if target_bytes is not None and self._read_buffer_size >= target_bytes: break # Otherwise, we need to call the more expensive find_read_pos. # It's inefficient to do this on every read, so instead # do it on the first read and whenever the read buffer # size has doubled. if self._read_buffer_size >= next_find_pos: pos = self._find_read_pos() if pos is not None: return pos next_find_pos = self._read_buffer_size * 2 return self._find_read_pos() def _handle_read(self) -> None: try: pos = self._read_to_buffer_loop() except UnsatisfiableReadError: raise except asyncio.CancelledError: raise except Exception as e: gen_log.warning("error on read: %s" % e) self.close(exc_info=e) return if pos is not None: self._read_from_buffer(pos) def _start_read(self) -> Future: if self._read_future is not None: # It is an error to start a read while a prior read is unresolved. # However, if the prior read is unresolved because the stream was # closed without satisfying it, it's better to raise # StreamClosedError instead of AssertionError. In particular, this # situation occurs in harmless situations in http1connection.py and # an AssertionError would be logged noisily. # # On the other hand, it is legal to start a new read while the # stream is closed, in case the read can be satisfied from the # read buffer. So we only want to check the closed status of the # stream if we need to decide what kind of error to raise for # "already reading". # # These conditions have proven difficult to test; we have no # unittests that reliably verify this behavior so be careful # when making changes here. See #2651 and #2719. self._check_closed() assert self._read_future is None, "Already reading" self._read_future = Future() return self._read_future def _finish_read(self, size: int, streaming: bool) -> None: if self._user_read_buffer: self._read_buffer = self._after_user_read_buffer or bytearray() self._after_user_read_buffer = None self._read_buffer_pos = 0 self._read_buffer_size = len(self._read_buffer) self._user_read_buffer = False result = size # type: Union[int, bytes] else: result = self._consume(size) if self._read_future is not None: future = self._read_future self._read_future = None future_set_result_unless_cancelled(future, result) self._maybe_add_error_listener() def _try_inline_read(self) -> None: """Attempt to complete the current read operation from buffered data. If the read can be completed without blocking, schedules the read callback on the next IOLoop iteration; otherwise starts listening for reads on the socket. """ # See if we've already got the data from a previous read pos = self._find_read_pos() if pos is not None: self._read_from_buffer(pos) return self._check_closed() pos = self._read_to_buffer_loop() if pos is not None: self._read_from_buffer(pos) return # We couldn't satisfy the read inline, so make sure we're # listening for new data unless the stream is closed. if not self.closed(): self._add_io_state(ioloop.IOLoop.READ) def _read_to_buffer(self) -> Optional[int]: """Reads from the socket and appends the result to the read buffer. Returns the number of bytes read. Returns 0 if there is nothing to read (i.e. the read returns EWOULDBLOCK or equivalent). On error closes the socket and raises an exception. """ try: while True: try: if self._user_read_buffer: buf = memoryview(self._read_buffer)[ self._read_buffer_size : ] # type: Union[memoryview, bytearray] else: buf = bytearray(self.read_chunk_size) bytes_read = self.read_from_fd(buf) except (socket.error, IOError, OSError) as e: # ssl.SSLError is a subclass of socket.error if self._is_connreset(e): # Treat ECONNRESET as a connection close rather than # an error to minimize log spam (the exception will # be available on self.error for apps that care). self.close(exc_info=e) return None self.close(exc_info=e) raise break if bytes_read is None: return 0 elif bytes_read == 0: self.close() return 0 if not self._user_read_buffer: self._read_buffer += memoryview(buf)[:bytes_read] self._read_buffer_size += bytes_read finally: # Break the reference to buf so we don't waste a chunk's worth of # memory in case an exception hangs on to our stack frame. del buf if self._read_buffer_size > self.max_buffer_size: gen_log.error("Reached maximum read buffer size") self.close() raise StreamBufferFullError("Reached maximum read buffer size") return bytes_read def _read_from_buffer(self, pos: int) -> None: """Attempts to complete the currently-pending read from the buffer. The argument is either a position in the read buffer or None, as returned by _find_read_pos. """ self._read_bytes = self._read_delimiter = self._read_regex = None self._read_partial = False self._finish_read(pos, False) def _find_read_pos(self) -> Optional[int]: """Attempts to find a position in the read buffer that satisfies the currently-pending read. Returns a position in the buffer if the current read can be satisfied, or None if it cannot. """ if self._read_bytes is not None and ( self._read_buffer_size >= self._read_bytes or (self._read_partial and self._read_buffer_size > 0) ): num_bytes = min(self._read_bytes, self._read_buffer_size) return num_bytes elif self._read_delimiter is not None: # Multi-byte delimiters (e.g. '\r\n') may straddle two # chunks in the read buffer, so we can't easily find them # without collapsing the buffer. However, since protocols # using delimited reads (as opposed to reads of a known # length) tend to be "line" oriented, the delimiter is likely # to be in the first few chunks. Merge the buffer gradually # since large merges are relatively expensive and get undone in # _consume(). if self._read_buffer: loc = self._read_buffer.find( self._read_delimiter, self._read_buffer_pos ) if loc != -1: loc -= self._read_buffer_pos delimiter_len = len(self._read_delimiter) self._check_max_bytes(self._read_delimiter, loc + delimiter_len) return loc + delimiter_len self._check_max_bytes(self._read_delimiter, self._read_buffer_size) elif self._read_regex is not None: if self._read_buffer: m = self._read_regex.search(self._read_buffer, self._read_buffer_pos) if m is not None: loc = m.end() - self._read_buffer_pos self._check_max_bytes(self._read_regex, loc) return loc self._check_max_bytes(self._read_regex, self._read_buffer_size) return None def _check_max_bytes(self, delimiter: Union[bytes, Pattern], size: int) -> None: if self._read_max_bytes is not None and size > self._read_max_bytes: raise UnsatisfiableReadError( "delimiter %r not found within %d bytes" % (delimiter, self._read_max_bytes) ) def _handle_write(self) -> None: while True: size = len(self._write_buffer) if not size: break assert size > 0 try: if _WINDOWS: # On windows, socket.send blows up if given a # write buffer that's too large, instead of just # returning the number of bytes it was able to # process. Therefore we must not call socket.send # with more than 128KB at a time. size = 128 * 1024 num_bytes = self.write_to_fd(self._write_buffer.peek(size)) if num_bytes == 0: break self._write_buffer.advance(num_bytes) self._total_write_done_index += num_bytes except BlockingIOError: break except (socket.error, IOError, OSError) as e: if not self._is_connreset(e): # Broken pipe errors are usually caused by connection # reset, and its better to not log EPIPE errors to # minimize log spam gen_log.warning("Write error on %s: %s", self.fileno(), e) self.close(exc_info=e) return while self._write_futures: index, future = self._write_futures[0] if index > self._total_write_done_index: break self._write_futures.popleft() future_set_result_unless_cancelled(future, None) def _consume(self, loc: int) -> bytes: # Consume loc bytes from the read buffer and return them if loc == 0: return b"" assert loc <= self._read_buffer_size # Slice the bytearray buffer into bytes, without intermediate copying b = ( memoryview(self._read_buffer)[ self._read_buffer_pos : self._read_buffer_pos + loc ] ).tobytes() self._read_buffer_pos += loc self._read_buffer_size -= loc # Amortized O(1) shrink # (this heuristic is implemented natively in Python 3.4+ # but is replicated here for Python 2) if self._read_buffer_pos > self._read_buffer_size: del self._read_buffer[: self._read_buffer_pos] self._read_buffer_pos = 0 return b def _check_closed(self) -> None: if self.closed(): raise StreamClosedError(real_error=self.error) def _maybe_add_error_listener(self) -> None: # This method is part of an optimization: to detect a connection that # is closed when we're not actively reading or writing, we must listen # for read events. However, it is inefficient to do this when the # connection is first established because we are going to read or write # immediately anyway. Instead, we insert checks at various times to # see if the connection is idle and add the read listener then. if self._state is None or self._state == ioloop.IOLoop.ERROR: if ( not self.closed() and self._read_buffer_size == 0 and self._close_callback is not None ): self._add_io_state(ioloop.IOLoop.READ) def _add_io_state(self, state: int) -> None: """Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler. Implementation notes: Reads and writes have a fast path and a slow path. The fast path reads synchronously from socket buffers, while the slow path uses `_add_io_state` to schedule an IOLoop callback. To detect closed connections, we must have called `_add_io_state` at some point, but we want to delay this as much as possible so we don't have to set an `IOLoop.ERROR` listener that will be overwritten by the next slow-path operation. If a sequence of fast-path ops do not end in a slow-path op, (e.g. for an @asynchronous long-poll request), we must add the error handler. TODO: reevaluate this now that callbacks are gone. """ if self.closed(): # connection has been closed, so there can be no future events return if self._state is None: self._state = ioloop.IOLoop.ERROR | state self.io_loop.add_handler(self.fileno(), self._handle_events, self._state) elif not self._state & state: self._state = self._state | state self.io_loop.update_handler(self.fileno(), self._state) def _is_connreset(self, exc: BaseException) -> bool: """Return ``True`` if exc is ECONNRESET or equivalent. May be overridden in subclasses. """ return ( isinstance(exc, (socket.error, IOError)) and errno_from_exception(exc) in _ERRNO_CONNRESET ) class IOStream(BaseIOStream): r"""Socket-based `IOStream` implementation. This class supports the read and write methods from `BaseIOStream` plus a `connect` method. The ``socket`` parameter may either be connected or unconnected. For server operations the socket is the result of calling `socket.accept `. For client operations the socket is created with `socket.socket`, and may either be connected before passing it to the `IOStream` or connected with `IOStream.connect`. A very simple (and broken) HTTP client using this class: .. testcode:: import tornado.ioloop import tornado.iostream import socket async def main(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = tornado.iostream.IOStream(s) await stream.connect(("friendfeed.com", 80)) await stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n") header_data = await stream.read_until(b"\r\n\r\n") headers = {} for line in header_data.split(b"\r\n"): parts = line.split(b":") if len(parts) == 2: headers[parts[0].strip()] = parts[1].strip() body_data = await stream.read_bytes(int(headers[b"Content-Length"])) print(body_data) stream.close() if __name__ == '__main__': tornado.ioloop.IOLoop.current().run_sync(main) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = tornado.iostream.IOStream(s) stream.connect(("friendfeed.com", 80), send_request) tornado.ioloop.IOLoop.current().start() .. testoutput:: :hide: """ def __init__(self, socket: socket.socket, *args: Any, **kwargs: Any) -> None: self.socket = socket self.socket.setblocking(False) super().__init__(*args, **kwargs) def fileno(self) -> Union[int, ioloop._Selectable]: return self.socket def close_fd(self) -> None: self.socket.close() self.socket = None # type: ignore def get_fd_error(self) -> Optional[Exception]: errno = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) return socket.error(errno, os.strerror(errno)) def read_from_fd(self, buf: Union[bytearray, memoryview]) -> Optional[int]: try: return self.socket.recv_into(buf, len(buf)) except BlockingIOError: return None finally: del buf def write_to_fd(self, data: memoryview) -> int: try: return self.socket.send(data) # type: ignore finally: # Avoid keeping to data, which can be a memoryview. # See https://github.com/tornadoweb/tornado/pull/2008 del data def connect( self: _IOStreamType, address: Any, server_hostname: Optional[str] = None ) -> "Future[_IOStreamType]": """Connects the socket to a remote address without blocking. May only be called if the socket passed to the constructor was not previously connected. The address parameter is in the same format as for `socket.connect ` for the type of socket passed to the IOStream constructor, e.g. an ``(ip, port)`` tuple. Hostnames are accepted here, but will be resolved synchronously and block the IOLoop. If you have a hostname instead of an IP address, the `.TCPClient` class is recommended instead of calling this method directly. `.TCPClient` will do asynchronous DNS resolution and handle both IPv4 and IPv6. If ``callback`` is specified, it will be called with no arguments when the connection is completed; if not this method returns a `.Future` (whose result after a successful connection will be the stream itself). In SSL mode, the ``server_hostname`` parameter will be used for certificate validation (unless disabled in the ``ssl_options``) and SNI (if supported; requires Python 2.7.9+). Note that it is safe to call `IOStream.write ` while the connection is pending, in which case the data will be written as soon as the connection is ready. Calling `IOStream` read methods before the socket is connected works on some platforms but is non-portable. .. versionchanged:: 4.0 If no callback is given, returns a `.Future`. .. versionchanged:: 4.2 SSL certificates are validated by default; pass ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a suitably-configured `ssl.SSLContext` to the `SSLIOStream` constructor to disable. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """ self._connecting = True future = Future() # type: Future[_IOStreamType] self._connect_future = typing.cast("Future[IOStream]", future) try: self.socket.connect(address) except BlockingIOError: # In non-blocking mode we expect connect() to raise an # exception with EINPROGRESS or EWOULDBLOCK. pass except socket.error as e: # On freebsd, other errors such as ECONNREFUSED may be # returned immediately when attempting to connect to # localhost, so handle them the same way as an error # reported later in _handle_connect. if future is None: gen_log.warning("Connect error on fd %s: %s", self.socket.fileno(), e) self.close(exc_info=e) return future self._add_io_state(self.io_loop.WRITE) return future def start_tls( self, server_side: bool, ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None, server_hostname: Optional[str] = None, ) -> Awaitable["SSLIOStream"]: """Convert this `IOStream` to an `SSLIOStream`. This enables protocols that begin in clear-text mode and switch to SSL after some initial negotiation (such as the ``STARTTLS`` extension to SMTP and IMAP). This method cannot be used if there are outstanding reads or writes on the stream, or if there is any data in the IOStream's buffer (data in the operating system's socket buffer is allowed). This means it must generally be used immediately after reading or writing the last clear-text data. It can also be used immediately after connecting, before any reads or writes. The ``ssl_options`` argument may be either an `ssl.SSLContext` object or a dictionary of keyword arguments for the `ssl.wrap_socket` function. The ``server_hostname`` argument will be used for certificate validation unless disabled in the ``ssl_options``. This method returns a `.Future` whose result is the new `SSLIOStream`. After this method has been called, any other operation on the original stream is undefined. If a close callback is defined on this stream, it will be transferred to the new stream. .. versionadded:: 4.0 .. versionchanged:: 4.2 SSL certificates are validated by default; pass ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a suitably-configured `ssl.SSLContext` to disable. """ if ( self._read_future or self._write_futures or self._connect_future or self._closed or self._read_buffer or self._write_buffer ): raise ValueError("IOStream is not idle; cannot convert to SSL") if ssl_options is None: if server_side: ssl_options = _server_ssl_defaults else: ssl_options = _client_ssl_defaults socket = self.socket self.io_loop.remove_handler(socket) self.socket = None # type: ignore socket = ssl_wrap_socket( socket, ssl_options, server_hostname=server_hostname, server_side=server_side, do_handshake_on_connect=False, ) orig_close_callback = self._close_callback self._close_callback = None future = Future() # type: Future[SSLIOStream] ssl_stream = SSLIOStream(socket, ssl_options=ssl_options) ssl_stream.set_close_callback(orig_close_callback) ssl_stream._ssl_connect_future = future ssl_stream.max_buffer_size = self.max_buffer_size ssl_stream.read_chunk_size = self.read_chunk_size return future def _handle_connect(self) -> None: try: err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) except socket.error as e: # Hurd doesn't allow SO_ERROR for loopback sockets because all # errors for such sockets are reported synchronously. if errno_from_exception(e) == errno.ENOPROTOOPT: err = 0 if err != 0: self.error = socket.error(err, os.strerror(err)) # IOLoop implementations may vary: some of them return # an error state before the socket becomes writable, so # in that case a connection failure would be handled by the # error path in _handle_events instead of here. if self._connect_future is None: gen_log.warning( "Connect error on fd %s: %s", self.socket.fileno(), errno.errorcode[err], ) self.close() return if self._connect_future is not None: future = self._connect_future self._connect_future = None future_set_result_unless_cancelled(future, self) self._connecting = False def set_nodelay(self, value: bool) -> None: if self.socket is not None and self.socket.family in ( socket.AF_INET, socket.AF_INET6, ): try: self.socket.setsockopt( socket.IPPROTO_TCP, socket.TCP_NODELAY, 1 if value else 0 ) except socket.error as e: # Sometimes setsockopt will fail if the socket is closed # at the wrong time. This can happen with HTTPServer # resetting the value to ``False`` between requests. if e.errno != errno.EINVAL and not self._is_connreset(e): raise class SSLIOStream(IOStream): """A utility class to write to and read from a non-blocking SSL socket. If the socket passed to the constructor is already connected, it should be wrapped with:: ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs) before constructing the `SSLIOStream`. Unconnected sockets will be wrapped when `IOStream.connect` is finished. """ socket = None # type: ssl.SSLSocket def __init__(self, *args: Any, **kwargs: Any) -> None: """The ``ssl_options`` keyword argument may either be an `ssl.SSLContext` object or a dictionary of keywords arguments for `ssl.wrap_socket` """ self._ssl_options = kwargs.pop("ssl_options", _client_ssl_defaults) super().__init__(*args, **kwargs) self._ssl_accepting = True self._handshake_reading = False self._handshake_writing = False self._server_hostname = None # type: Optional[str] # If the socket is already connected, attempt to start the handshake. try: self.socket.getpeername() except socket.error: pass else: # Indirectly start the handshake, which will run on the next # IOLoop iteration and then the real IO state will be set in # _handle_events. self._add_io_state(self.io_loop.WRITE) def reading(self) -> bool: return self._handshake_reading or super().reading() def writing(self) -> bool: return self._handshake_writing or super().writing() def _do_ssl_handshake(self) -> None: # Based on code from test_ssl.py in the python stdlib try: self._handshake_reading = False self._handshake_writing = False self.socket.do_handshake() except ssl.SSLError as err: if err.args[0] == ssl.SSL_ERROR_WANT_READ: self._handshake_reading = True return elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._handshake_writing = True return elif err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): return self.close(exc_info=err) elif err.args[0] == ssl.SSL_ERROR_SSL: try: peer = self.socket.getpeername() except Exception: peer = "(not connected)" gen_log.warning( "SSL Error on %s %s: %s", self.socket.fileno(), peer, err ) return self.close(exc_info=err) raise except ssl.CertificateError as err: # CertificateError can happen during handshake (hostname # verification) and should be passed to user. Starting # in Python 3.7, this error is a subclass of SSLError # and will be handled by the previous block instead. return self.close(exc_info=err) except socket.error as err: # Some port scans (e.g. nmap in -sT mode) have been known # to cause do_handshake to raise EBADF and ENOTCONN, so make # those errors quiet as well. # https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0 # Errno 0 is also possible in some cases (nc -z). # https://github.com/tornadoweb/tornado/issues/2504 if self._is_connreset(err) or err.args[0] in ( 0, errno.EBADF, errno.ENOTCONN, ): return self.close(exc_info=err) raise except AttributeError as err: # On Linux, if the connection was reset before the call to # wrap_socket, do_handshake will fail with an # AttributeError. return self.close(exc_info=err) else: self._ssl_accepting = False if not self._verify_cert(self.socket.getpeercert()): self.close() return self._finish_ssl_connect() def _finish_ssl_connect(self) -> None: if self._ssl_connect_future is not None: future = self._ssl_connect_future self._ssl_connect_future = None future_set_result_unless_cancelled(future, self) def _verify_cert(self, peercert: Any) -> bool: """Returns ``True`` if peercert is valid according to the configured validation mode and hostname. The ssl handshake already tested the certificate for a valid CA signature; the only thing that remains is to check the hostname. """ if isinstance(self._ssl_options, dict): verify_mode = self._ssl_options.get("cert_reqs", ssl.CERT_NONE) elif isinstance(self._ssl_options, ssl.SSLContext): verify_mode = self._ssl_options.verify_mode assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL) if verify_mode == ssl.CERT_NONE or self._server_hostname is None: return True cert = self.socket.getpeercert() if cert is None and verify_mode == ssl.CERT_REQUIRED: gen_log.warning("No SSL certificate given") return False try: ssl.match_hostname(peercert, self._server_hostname) except ssl.CertificateError as e: gen_log.warning("Invalid SSL certificate: %s" % e) return False else: return True def _handle_read(self) -> None: if self._ssl_accepting: self._do_ssl_handshake() return super()._handle_read() def _handle_write(self) -> None: if self._ssl_accepting: self._do_ssl_handshake() return super()._handle_write() def connect( self, address: Tuple, server_hostname: Optional[str] = None ) -> "Future[SSLIOStream]": self._server_hostname = server_hostname # Ignore the result of connect(). If it fails, # wait_for_handshake will raise an error too. This is # necessary for the old semantics of the connect callback # (which takes no arguments). In 6.0 this can be refactored to # be a regular coroutine. # TODO: This is trickier than it looks, since if write() # is called with a connect() pending, we want the connect # to resolve before the write. Or do we care about this? # (There's a test for it, but I think in practice users # either wait for the connect before performing a write or # they don't care about the connect Future at all) fut = super().connect(address) fut.add_done_callback(lambda f: f.exception()) return self.wait_for_handshake() def _handle_connect(self) -> None: # Call the superclass method to check for errors. super()._handle_connect() if self.closed(): return # When the connection is complete, wrap the socket for SSL # traffic. Note that we do this by overriding _handle_connect # instead of by passing a callback to super().connect because # user callbacks are enqueued asynchronously on the IOLoop, # but since _handle_events calls _handle_connect immediately # followed by _handle_write we need this to be synchronous. # # The IOLoop will get confused if we swap out self.socket while the # fd is registered, so remove it now and re-register after # wrap_socket(). self.io_loop.remove_handler(self.socket) old_state = self._state assert old_state is not None self._state = None self.socket = ssl_wrap_socket( self.socket, self._ssl_options, server_hostname=self._server_hostname, do_handshake_on_connect=False, ) self._add_io_state(old_state) def wait_for_handshake(self) -> "Future[SSLIOStream]": """Wait for the initial SSL handshake to complete. If a ``callback`` is given, it will be called with no arguments once the handshake is complete; otherwise this method returns a `.Future` which will resolve to the stream itself after the handshake is complete. Once the handshake is complete, information such as the peer's certificate and NPN/ALPN selections may be accessed on ``self.socket``. This method is intended for use on server-side streams or after using `IOStream.start_tls`; it should not be used with `IOStream.connect` (which already waits for the handshake to complete). It may only be called once per stream. .. versionadded:: 4.2 .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """ if self._ssl_connect_future is not None: raise RuntimeError("Already waiting") future = self._ssl_connect_future = Future() if not self._ssl_accepting: self._finish_ssl_connect() return future def write_to_fd(self, data: memoryview) -> int: try: return self.socket.send(data) # type: ignore except ssl.SSLError as e: if e.args[0] == ssl.SSL_ERROR_WANT_WRITE: # In Python 3.5+, SSLSocket.send raises a WANT_WRITE error if # the socket is not writeable; we need to transform this into # an EWOULDBLOCK socket.error or a zero return value, # either of which will be recognized by the caller of this # method. Prior to Python 3.5, an unwriteable socket would # simply return 0 bytes written. return 0 raise finally: # Avoid keeping to data, which can be a memoryview. # See https://github.com/tornadoweb/tornado/pull/2008 del data def read_from_fd(self, buf: Union[bytearray, memoryview]) -> Optional[int]: try: if self._ssl_accepting: # If the handshake hasn't finished yet, there can't be anything # to read (attempting to read may or may not raise an exception # depending on the SSL version) return None try: return self.socket.recv_into(buf, len(buf)) except ssl.SSLError as e: # SSLError is a subclass of socket.error, so this except # block must come first. if e.args[0] == ssl.SSL_ERROR_WANT_READ: return None else: raise except BlockingIOError: return None finally: del buf def _is_connreset(self, e: BaseException) -> bool: if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF: return True return super()._is_connreset(e) class PipeIOStream(BaseIOStream): """Pipe-based `IOStream` implementation. The constructor takes an integer file descriptor (such as one returned by `os.pipe`) rather than an open file object. Pipes are generally one-way, so a `PipeIOStream` can be used for reading or writing but not both. ``PipeIOStream`` is only available on Unix-based platforms. """ def __init__(self, fd: int, *args: Any, **kwargs: Any) -> None: self.fd = fd self._fio = io.FileIO(self.fd, "r+") os.set_blocking(fd, False) super().__init__(*args, **kwargs) def fileno(self) -> int: return self.fd def close_fd(self) -> None: self._fio.close() def write_to_fd(self, data: memoryview) -> int: try: return os.write(self.fd, data) # type: ignore finally: # Avoid keeping to data, which can be a memoryview. # See https://github.com/tornadoweb/tornado/pull/2008 del data def read_from_fd(self, buf: Union[bytearray, memoryview]) -> Optional[int]: try: return self._fio.readinto(buf) # type: ignore except (IOError, OSError) as e: if errno_from_exception(e) == errno.EBADF: # If the writing half of a pipe is closed, select will # report it as readable but reads will fail with EBADF. self.close(exc_info=e) return None else: raise finally: del buf def doctests() -> Any: import doctest return doctest.DocTestSuite() tornado-6.1.0/tornado/locale.py000066400000000000000000000507541374705040500164630ustar00rootroot00000000000000# Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Translation methods for generating localized strings. To load a locale and generate a translated string:: user_locale = tornado.locale.get("es_LA") print(user_locale.translate("Sign out")) `tornado.locale.get()` returns the closest matching locale, not necessarily the specific locale you requested. You can support pluralization with additional arguments to `~Locale.translate()`, e.g.:: people = [...] message = user_locale.translate( "%(list)s is online", "%(list)s are online", len(people)) print(message % {"list": user_locale.list(people)}) The first string is chosen if ``len(people) == 1``, otherwise the second string is chosen. Applications should call one of `load_translations` (which uses a simple CSV format) or `load_gettext_translations` (which uses the ``.mo`` format supported by `gettext` and related tools). If neither method is called, the `Locale.translate` method will simply return the original string. """ import codecs import csv import datetime import gettext import os import re from tornado import escape from tornado.log import gen_log from tornado._locale_data import LOCALE_NAMES from typing import Iterable, Any, Union, Dict, Optional _default_locale = "en_US" _translations = {} # type: Dict[str, Any] _supported_locales = frozenset([_default_locale]) _use_gettext = False CONTEXT_SEPARATOR = "\x04" def get(*locale_codes: str) -> "Locale": """Returns the closest match for the given locale codes. We iterate over all given locale codes in order. If we have a tight or a loose match for the code (e.g., "en" for "en_US"), we return the locale. Otherwise we move to the next code in the list. By default we return ``en_US`` if no translations are found for any of the specified locales. You can change the default locale with `set_default_locale()`. """ return Locale.get_closest(*locale_codes) def set_default_locale(code: str) -> None: """Sets the default locale. The default locale is assumed to be the language used for all strings in the system. The translations loaded from disk are mappings from the default locale to the destination locale. Consequently, you don't need to create a translation file for the default locale. """ global _default_locale global _supported_locales _default_locale = code _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) def load_translations(directory: str, encoding: Optional[str] = None) -> None: """Loads translations from CSV files in a directory. Translations are strings with optional Python-style named placeholders (e.g., ``My name is %(name)s``) and their associated translations. The directory should have translation files of the form ``LOCALE.csv``, e.g. ``es_GT.csv``. The CSV files should have two or three columns: string, translation, and an optional plural indicator. Plural indicators should be one of "plural" or "singular". A given string can have both singular and plural forms. For example ``%(name)s liked this`` may have a different verb conjugation depending on whether %(name)s is one name or a list of names. There should be two rows in the CSV file for that string, one with plural indicator "singular", and one "plural". For strings with no verbs that would change on translation, simply use "unknown" or the empty string (or don't include the column at all). The file is read using the `csv` module in the default "excel" dialect. In this format there should not be spaces after the commas. If no ``encoding`` parameter is given, the encoding will be detected automatically (among UTF-8 and UTF-16) if the file contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM is present. Example translation ``es_LA.csv``:: "I love you","Te amo" "%(name)s liked this","A %(name)s les gustó esto","plural" "%(name)s liked this","A %(name)s le gustó esto","singular" .. versionchanged:: 4.3 Added ``encoding`` parameter. Added support for BOM-based encoding detection, UTF-16, and UTF-8-with-BOM. """ global _translations global _supported_locales _translations = {} for path in os.listdir(directory): if not path.endswith(".csv"): continue locale, extension = path.split(".") if not re.match("[a-z]+(_[A-Z]+)?$", locale): gen_log.error( "Unrecognized locale %r (path: %s)", locale, os.path.join(directory, path), ) continue full_path = os.path.join(directory, path) if encoding is None: # Try to autodetect encoding based on the BOM. with open(full_path, "rb") as bf: data = bf.read(len(codecs.BOM_UTF16_LE)) if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): encoding = "utf-16" else: # utf-8-sig is "utf-8 with optional BOM". It's discouraged # in most cases but is common with CSV files because Excel # cannot read utf-8 files without a BOM. encoding = "utf-8-sig" # python 3: csv.reader requires a file open in text mode. # Specify an encoding to avoid dependence on $LANG environment variable. with open(full_path, encoding=encoding) as f: _translations[locale] = {} for i, row in enumerate(csv.reader(f)): if not row or len(row) < 2: continue row = [escape.to_unicode(c).strip() for c in row] english, translation = row[:2] if len(row) > 2: plural = row[2] or "unknown" else: plural = "unknown" if plural not in ("plural", "singular", "unknown"): gen_log.error( "Unrecognized plural indicator %r in %s line %d", plural, path, i + 1, ) continue _translations[locale].setdefault(plural, {})[english] = translation _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) gen_log.debug("Supported locales: %s", sorted(_supported_locales)) def load_gettext_translations(directory: str, domain: str) -> None: """Loads translations from `gettext`'s locale tree Locale tree is similar to system's ``/usr/share/locale``, like:: {directory}/{lang}/LC_MESSAGES/{domain}.mo Three steps are required to have your app translated: 1. Generate POT translation file:: xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc 2. Merge against existing POT file:: msgmerge old.po mydomain.po > new.po 3. Compile:: msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo """ global _translations global _supported_locales global _use_gettext _translations = {} for lang in os.listdir(directory): if lang.startswith("."): continue # skip .svn, etc if os.path.isfile(os.path.join(directory, lang)): continue try: os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo")) _translations[lang] = gettext.translation( domain, directory, languages=[lang] ) except Exception as e: gen_log.error("Cannot load translation for '%s': %s", lang, str(e)) continue _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) _use_gettext = True gen_log.debug("Supported locales: %s", sorted(_supported_locales)) def get_supported_locales() -> Iterable[str]: """Returns a list of all the supported locale codes.""" return _supported_locales class Locale(object): """Object representing a locale. After calling one of `load_translations` or `load_gettext_translations`, call `get` or `get_closest` to get a Locale object. """ _cache = {} # type: Dict[str, Locale] @classmethod def get_closest(cls, *locale_codes: str) -> "Locale": """Returns the closest match for the given locale code.""" for code in locale_codes: if not code: continue code = code.replace("-", "_") parts = code.split("_") if len(parts) > 2: continue elif len(parts) == 2: code = parts[0].lower() + "_" + parts[1].upper() if code in _supported_locales: return cls.get(code) if parts[0].lower() in _supported_locales: return cls.get(parts[0].lower()) return cls.get(_default_locale) @classmethod def get(cls, code: str) -> "Locale": """Returns the Locale for the given locale code. If it is not supported, we raise an exception. """ if code not in cls._cache: assert code in _supported_locales translations = _translations.get(code, None) if translations is None: locale = CSVLocale(code, {}) # type: Locale elif _use_gettext: locale = GettextLocale(code, translations) else: locale = CSVLocale(code, translations) cls._cache[code] = locale return cls._cache[code] def __init__(self, code: str) -> None: self.code = code self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown") self.rtl = False for prefix in ["fa", "ar", "he"]: if self.code.startswith(prefix): self.rtl = True break # Initialize strings for date formatting _ = self.translate self._months = [ _("January"), _("February"), _("March"), _("April"), _("May"), _("June"), _("July"), _("August"), _("September"), _("October"), _("November"), _("December"), ] self._weekdays = [ _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), _("Friday"), _("Saturday"), _("Sunday"), ] def translate( self, message: str, plural_message: Optional[str] = None, count: Optional[int] = None, ) -> str: """Returns the translation for the given message for this locale. If ``plural_message`` is given, you must also provide ``count``. We return ``plural_message`` when ``count != 1``, and we return the singular form for the given message when ``count == 1``. """ raise NotImplementedError() def pgettext( self, context: str, message: str, plural_message: Optional[str] = None, count: Optional[int] = None, ) -> str: raise NotImplementedError() def format_date( self, date: Union[int, float, datetime.datetime], gmt_offset: int = 0, relative: bool = True, shorter: bool = False, full_format: bool = False, ) -> str: """Formats the given date (which should be GMT). By default, we return a relative time (e.g., "2 minutes ago"). You can return an absolute date string with ``relative=False``. You can force a full format date ("July 10, 1980") with ``full_format=True``. This method is primarily intended for dates in the past. For dates in the future, we fall back to full format. """ if isinstance(date, (int, float)): date = datetime.datetime.utcfromtimestamp(date) now = datetime.datetime.utcnow() if date > now: if relative and (date - now).seconds < 60: # Due to click skew, things are some things slightly # in the future. Round timestamps in the immediate # future down to now in relative mode. date = now else: # Otherwise, future dates always use the full format. full_format = True local_date = date - datetime.timedelta(minutes=gmt_offset) local_now = now - datetime.timedelta(minutes=gmt_offset) local_yesterday = local_now - datetime.timedelta(hours=24) difference = now - date seconds = difference.seconds days = difference.days _ = self.translate format = None if not full_format: if relative and days == 0: if seconds < 50: return _("1 second ago", "%(seconds)d seconds ago", seconds) % { "seconds": seconds } if seconds < 50 * 60: minutes = round(seconds / 60.0) return _("1 minute ago", "%(minutes)d minutes ago", minutes) % { "minutes": minutes } hours = round(seconds / (60.0 * 60)) return _("1 hour ago", "%(hours)d hours ago", hours) % {"hours": hours} if days == 0: format = _("%(time)s") elif days == 1 and local_date.day == local_yesterday.day and relative: format = _("yesterday") if shorter else _("yesterday at %(time)s") elif days < 5: format = _("%(weekday)s") if shorter else _("%(weekday)s at %(time)s") elif days < 334: # 11mo, since confusing for same month last year format = ( _("%(month_name)s %(day)s") if shorter else _("%(month_name)s %(day)s at %(time)s") ) if format is None: format = ( _("%(month_name)s %(day)s, %(year)s") if shorter else _("%(month_name)s %(day)s, %(year)s at %(time)s") ) tfhour_clock = self.code not in ("en", "en_US", "zh_CN") if tfhour_clock: str_time = "%d:%02d" % (local_date.hour, local_date.minute) elif self.code == "zh_CN": str_time = "%s%d:%02d" % ( (u"\u4e0a\u5348", u"\u4e0b\u5348")[local_date.hour >= 12], local_date.hour % 12 or 12, local_date.minute, ) else: str_time = "%d:%02d %s" % ( local_date.hour % 12 or 12, local_date.minute, ("am", "pm")[local_date.hour >= 12], ) return format % { "month_name": self._months[local_date.month - 1], "weekday": self._weekdays[local_date.weekday()], "day": str(local_date.day), "year": str(local_date.year), "time": str_time, } def format_day( self, date: datetime.datetime, gmt_offset: int = 0, dow: bool = True ) -> bool: """Formats the given date as a day of week. Example: "Monday, January 22". You can remove the day of week with ``dow=False``. """ local_date = date - datetime.timedelta(minutes=gmt_offset) _ = self.translate if dow: return _("%(weekday)s, %(month_name)s %(day)s") % { "month_name": self._months[local_date.month - 1], "weekday": self._weekdays[local_date.weekday()], "day": str(local_date.day), } else: return _("%(month_name)s %(day)s") % { "month_name": self._months[local_date.month - 1], "day": str(local_date.day), } def list(self, parts: Any) -> str: """Returns a comma-separated list for the given list of parts. The format is, e.g., "A, B and C", "A and B" or just "A" for lists of size 1. """ _ = self.translate if len(parts) == 0: return "" if len(parts) == 1: return parts[0] comma = u" \u0648 " if self.code.startswith("fa") else u", " return _("%(commas)s and %(last)s") % { "commas": comma.join(parts[:-1]), "last": parts[len(parts) - 1], } def friendly_number(self, value: int) -> str: """Returns a comma-separated number for the given integer.""" if self.code not in ("en", "en_US"): return str(value) s = str(value) parts = [] while s: parts.append(s[-3:]) s = s[:-3] return ",".join(reversed(parts)) class CSVLocale(Locale): """Locale implementation using tornado's CSV translation format.""" def __init__(self, code: str, translations: Dict[str, Dict[str, str]]) -> None: self.translations = translations super().__init__(code) def translate( self, message: str, plural_message: Optional[str] = None, count: Optional[int] = None, ) -> str: if plural_message is not None: assert count is not None if count != 1: message = plural_message message_dict = self.translations.get("plural", {}) else: message_dict = self.translations.get("singular", {}) else: message_dict = self.translations.get("unknown", {}) return message_dict.get(message, message) def pgettext( self, context: str, message: str, plural_message: Optional[str] = None, count: Optional[int] = None, ) -> str: if self.translations: gen_log.warning("pgettext is not supported by CSVLocale") return self.translate(message, plural_message, count) class GettextLocale(Locale): """Locale implementation using the `gettext` module.""" def __init__(self, code: str, translations: gettext.NullTranslations) -> None: self.ngettext = translations.ngettext self.gettext = translations.gettext # self.gettext must exist before __init__ is called, since it # calls into self.translate super().__init__(code) def translate( self, message: str, plural_message: Optional[str] = None, count: Optional[int] = None, ) -> str: if plural_message is not None: assert count is not None return self.ngettext(message, plural_message, count) else: return self.gettext(message) def pgettext( self, context: str, message: str, plural_message: Optional[str] = None, count: Optional[int] = None, ) -> str: """Allows to set context for translation, accepts plural forms. Usage example:: pgettext("law", "right") pgettext("good", "right") Plural message example:: pgettext("organization", "club", "clubs", len(clubs)) pgettext("stick", "club", "clubs", len(clubs)) To generate POT file with context, add following options to step 1 of `load_gettext_translations` sequence:: xgettext [basic options] --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3 .. versionadded:: 4.2 """ if plural_message is not None: assert count is not None msgs_with_ctxt = ( "%s%s%s" % (context, CONTEXT_SEPARATOR, message), "%s%s%s" % (context, CONTEXT_SEPARATOR, plural_message), count, ) result = self.ngettext(*msgs_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = self.ngettext(message, plural_message, count) return result else: msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message) result = self.gettext(msg_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = message return result tornado-6.1.0/tornado/locks.py000066400000000000000000000420061374705040500163260ustar00rootroot00000000000000# Copyright 2015 The Tornado Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import datetime import types from tornado import gen, ioloop from tornado.concurrent import Future, future_set_result_unless_cancelled from typing import Union, Optional, Type, Any, Awaitable import typing if typing.TYPE_CHECKING: from typing import Deque, Set # noqa: F401 __all__ = ["Condition", "Event", "Semaphore", "BoundedSemaphore", "Lock"] class _TimeoutGarbageCollector(object): """Base class for objects that periodically clean up timed-out waiters. Avoids memory leak in a common pattern like: while True: yield condition.wait(short_timeout) print('looping....') """ def __init__(self) -> None: self._waiters = collections.deque() # type: Deque[Future] self._timeouts = 0 def _garbage_collect(self) -> None: # Occasionally clear timed-out waiters. self._timeouts += 1 if self._timeouts > 100: self._timeouts = 0 self._waiters = collections.deque(w for w in self._waiters if not w.done()) class Condition(_TimeoutGarbageCollector): """A condition allows one or more coroutines to wait until notified. Like a standard `threading.Condition`, but does not need an underlying lock that is acquired and released. With a `Condition`, coroutines can wait to be notified by other coroutines: .. testcode:: from tornado import gen from tornado.ioloop import IOLoop from tornado.locks import Condition condition = Condition() async def waiter(): print("I'll wait right here") await condition.wait() print("I'm done waiting") async def notifier(): print("About to notify") condition.notify() print("Done notifying") async def runner(): # Wait for waiter() and notifier() in parallel await gen.multi([waiter(), notifier()]) IOLoop.current().run_sync(runner) .. testoutput:: I'll wait right here About to notify Done notifying I'm done waiting `wait` takes an optional ``timeout`` argument, which is either an absolute timestamp:: io_loop = IOLoop.current() # Wait up to 1 second for a notification. await condition.wait(timeout=io_loop.time() + 1) ...or a `datetime.timedelta` for a timeout relative to the current time:: # Wait up to 1 second. await condition.wait(timeout=datetime.timedelta(seconds=1)) The method returns False if there's no notification before the deadline. .. versionchanged:: 5.0 Previously, waiters could be notified synchronously from within `notify`. Now, the notification will always be received on the next iteration of the `.IOLoop`. """ def __init__(self) -> None: super().__init__() self.io_loop = ioloop.IOLoop.current() def __repr__(self) -> str: result = "<%s" % (self.__class__.__name__,) if self._waiters: result += " waiters[%s]" % len(self._waiters) return result + ">" def wait( self, timeout: Optional[Union[float, datetime.timedelta]] = None ) -> Awaitable[bool]: """Wait for `.notify`. Returns a `.Future` that resolves ``True`` if the condition is notified, or ``False`` after a timeout. """ waiter = Future() # type: Future[bool] self._waiters.append(waiter) if timeout: def on_timeout() -> None: if not waiter.done(): future_set_result_unless_cancelled(waiter, False) self._garbage_collect() io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) waiter.add_done_callback(lambda _: io_loop.remove_timeout(timeout_handle)) return waiter def notify(self, n: int = 1) -> None: """Wake ``n`` waiters.""" waiters = [] # Waiters we plan to run right now. while n and self._waiters: waiter = self._waiters.popleft() if not waiter.done(): # Might have timed out. n -= 1 waiters.append(waiter) for waiter in waiters: future_set_result_unless_cancelled(waiter, True) def notify_all(self) -> None: """Wake all waiters.""" self.notify(len(self._waiters)) class Event(object): """An event blocks coroutines until its internal flag is set to True. Similar to `threading.Event`. A coroutine can wait for an event to be set. Once it is set, calls to ``yield event.wait()`` will not block unless the event has been cleared: .. testcode:: from tornado import gen from tornado.ioloop import IOLoop from tornado.locks import Event event = Event() async def waiter(): print("Waiting for event") await event.wait() print("Not waiting this time") await event.wait() print("Done") async def setter(): print("About to set the event") event.set() async def runner(): await gen.multi([waiter(), setter()]) IOLoop.current().run_sync(runner) .. testoutput:: Waiting for event About to set the event Not waiting this time Done """ def __init__(self) -> None: self._value = False self._waiters = set() # type: Set[Future[None]] def __repr__(self) -> str: return "<%s %s>" % ( self.__class__.__name__, "set" if self.is_set() else "clear", ) def is_set(self) -> bool: """Return ``True`` if the internal flag is true.""" return self._value def set(self) -> None: """Set the internal flag to ``True``. All waiters are awakened. Calling `.wait` once the flag is set will not block. """ if not self._value: self._value = True for fut in self._waiters: if not fut.done(): fut.set_result(None) def clear(self) -> None: """Reset the internal flag to ``False``. Calls to `.wait` will block until `.set` is called. """ self._value = False def wait( self, timeout: Optional[Union[float, datetime.timedelta]] = None ) -> Awaitable[None]: """Block until the internal flag is true. Returns an awaitable, which raises `tornado.util.TimeoutError` after a timeout. """ fut = Future() # type: Future[None] if self._value: fut.set_result(None) return fut self._waiters.add(fut) fut.add_done_callback(lambda fut: self._waiters.remove(fut)) if timeout is None: return fut else: timeout_fut = gen.with_timeout(timeout, fut) # This is a slightly clumsy workaround for the fact that # gen.with_timeout doesn't cancel its futures. Cancelling # fut will remove it from the waiters list. timeout_fut.add_done_callback( lambda tf: fut.cancel() if not fut.done() else None ) return timeout_fut class _ReleasingContextManager(object): """Releases a Lock or Semaphore at the end of a "with" statement. with (yield semaphore.acquire()): pass # Now semaphore.release() has been called. """ def __init__(self, obj: Any) -> None: self._obj = obj def __enter__(self) -> None: pass def __exit__( self, exc_type: "Optional[Type[BaseException]]", exc_val: Optional[BaseException], exc_tb: Optional[types.TracebackType], ) -> None: self._obj.release() class Semaphore(_TimeoutGarbageCollector): """A lock that can be acquired a fixed number of times before blocking. A Semaphore manages a counter representing the number of `.release` calls minus the number of `.acquire` calls, plus an initial value. The `.acquire` method blocks if necessary until it can return without making the counter negative. Semaphores limit access to a shared resource. To allow access for two workers at a time: .. testsetup:: semaphore from collections import deque from tornado import gen from tornado.ioloop import IOLoop from tornado.concurrent import Future # Ensure reliable doctest output: resolve Futures one at a time. futures_q = deque([Future() for _ in range(3)]) async def simulator(futures): for f in futures: # simulate the asynchronous passage of time await gen.sleep(0) await gen.sleep(0) f.set_result(None) IOLoop.current().add_callback(simulator, list(futures_q)) def use_some_resource(): return futures_q.popleft() .. testcode:: semaphore from tornado import gen from tornado.ioloop import IOLoop from tornado.locks import Semaphore sem = Semaphore(2) async def worker(worker_id): await sem.acquire() try: print("Worker %d is working" % worker_id) await use_some_resource() finally: print("Worker %d is done" % worker_id) sem.release() async def runner(): # Join all workers. await gen.multi([worker(i) for i in range(3)]) IOLoop.current().run_sync(runner) .. testoutput:: semaphore Worker 0 is working Worker 1 is working Worker 0 is done Worker 2 is working Worker 1 is done Worker 2 is done Workers 0 and 1 are allowed to run concurrently, but worker 2 waits until the semaphore has been released once, by worker 0. The semaphore can be used as an async context manager:: async def worker(worker_id): async with sem: print("Worker %d is working" % worker_id) await use_some_resource() # Now the semaphore has been released. print("Worker %d is done" % worker_id) For compatibility with older versions of Python, `.acquire` is a context manager, so ``worker`` could also be written as:: @gen.coroutine def worker(worker_id): with (yield sem.acquire()): print("Worker %d is working" % worker_id) yield use_some_resource() # Now the semaphore has been released. print("Worker %d is done" % worker_id) .. versionchanged:: 4.3 Added ``async with`` support in Python 3.5. """ def __init__(self, value: int = 1) -> None: super().__init__() if value < 0: raise ValueError("semaphore initial value must be >= 0") self._value = value def __repr__(self) -> str: res = super().__repr__() extra = ( "locked" if self._value == 0 else "unlocked,value:{0}".format(self._value) ) if self._waiters: extra = "{0},waiters:{1}".format(extra, len(self._waiters)) return "<{0} [{1}]>".format(res[1:-1], extra) def release(self) -> None: """Increment the counter and wake one waiter.""" self._value += 1 while self._waiters: waiter = self._waiters.popleft() if not waiter.done(): self._value -= 1 # If the waiter is a coroutine paused at # # with (yield semaphore.acquire()): # # then the context manager's __exit__ calls release() at the end # of the "with" block. waiter.set_result(_ReleasingContextManager(self)) break def acquire( self, timeout: Optional[Union[float, datetime.timedelta]] = None ) -> Awaitable[_ReleasingContextManager]: """Decrement the counter. Returns an awaitable. Block if the counter is zero and wait for a `.release`. The awaitable raises `.TimeoutError` after the deadline. """ waiter = Future() # type: Future[_ReleasingContextManager] if self._value > 0: self._value -= 1 waiter.set_result(_ReleasingContextManager(self)) else: self._waiters.append(waiter) if timeout: def on_timeout() -> None: if not waiter.done(): waiter.set_exception(gen.TimeoutError()) self._garbage_collect() io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) waiter.add_done_callback( lambda _: io_loop.remove_timeout(timeout_handle) ) return waiter def __enter__(self) -> None: raise RuntimeError("Use 'async with' instead of 'with' for Semaphore") def __exit__( self, typ: "Optional[Type[BaseException]]", value: Optional[BaseException], traceback: Optional[types.TracebackType], ) -> None: self.__enter__() async def __aenter__(self) -> None: await self.acquire() async def __aexit__( self, typ: "Optional[Type[BaseException]]", value: Optional[BaseException], tb: Optional[types.TracebackType], ) -> None: self.release() class BoundedSemaphore(Semaphore): """A semaphore that prevents release() being called too many times. If `.release` would increment the semaphore's value past the initial value, it raises `ValueError`. Semaphores are mostly used to guard resources with limited capacity, so a semaphore released too many times is a sign of a bug. """ def __init__(self, value: int = 1) -> None: super().__init__(value=value) self._initial_value = value def release(self) -> None: """Increment the counter and wake one waiter.""" if self._value >= self._initial_value: raise ValueError("Semaphore released too many times") super().release() class Lock(object): """A lock for coroutines. A Lock begins unlocked, and `acquire` locks it immediately. While it is locked, a coroutine that yields `acquire` waits until another coroutine calls `release`. Releasing an unlocked lock raises `RuntimeError`. A Lock can be used as an async context manager with the ``async with`` statement: >>> from tornado import locks >>> lock = locks.Lock() >>> >>> async def f(): ... async with lock: ... # Do something holding the lock. ... pass ... ... # Now the lock is released. For compatibility with older versions of Python, the `.acquire` method asynchronously returns a regular context manager: >>> async def f2(): ... with (yield lock.acquire()): ... # Do something holding the lock. ... pass ... ... # Now the lock is released. .. versionchanged:: 4.3 Added ``async with`` support in Python 3.5. """ def __init__(self) -> None: self._block = BoundedSemaphore(value=1) def __repr__(self) -> str: return "<%s _block=%s>" % (self.__class__.__name__, self._block) def acquire( self, timeout: Optional[Union[float, datetime.timedelta]] = None ) -> Awaitable[_ReleasingContextManager]: """Attempt to lock. Returns an awaitable. Returns an awaitable, which raises `tornado.util.TimeoutError` after a timeout. """ return self._block.acquire(timeout) def release(self) -> None: """Unlock. The first coroutine in line waiting for `acquire` gets the lock. If not locked, raise a `RuntimeError`. """ try: self._block.release() except ValueError: raise RuntimeError("release unlocked lock") def __enter__(self) -> None: raise RuntimeError("Use `async with` instead of `with` for Lock") def __exit__( self, typ: "Optional[Type[BaseException]]", value: Optional[BaseException], tb: Optional[types.TracebackType], ) -> None: self.__enter__() async def __aenter__(self) -> None: await self.acquire() async def __aexit__( self, typ: "Optional[Type[BaseException]]", value: Optional[BaseException], tb: Optional[types.TracebackType], ) -> None: self.release() tornado-6.1.0/tornado/log.py000066400000000000000000000301761374705040500160010ustar00rootroot00000000000000# # Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Logging support for Tornado. Tornado uses three logger streams: * ``tornado.access``: Per-request logging for Tornado's HTTP servers (and potentially other servers in the future) * ``tornado.application``: Logging of errors from application code (i.e. uncaught exceptions from callbacks) * ``tornado.general``: General-purpose logging, including any errors or warnings from Tornado itself. These streams may be configured independently using the standard library's `logging` module. For example, you may wish to send ``tornado.access`` logs to a separate file for analysis. """ import logging import logging.handlers import sys from tornado.escape import _unicode from tornado.util import unicode_type, basestring_type try: import colorama # type: ignore except ImportError: colorama = None try: import curses except ImportError: curses = None # type: ignore from typing import Dict, Any, cast, Optional # Logger objects for internal tornado use access_log = logging.getLogger("tornado.access") app_log = logging.getLogger("tornado.application") gen_log = logging.getLogger("tornado.general") def _stderr_supports_color() -> bool: try: if hasattr(sys.stderr, "isatty") and sys.stderr.isatty(): if curses: curses.setupterm() if curses.tigetnum("colors") > 0: return True elif colorama: if sys.stderr is getattr( colorama.initialise, "wrapped_stderr", object() ): return True except Exception: # Very broad exception handling because it's always better to # fall back to non-colored logs than to break at startup. pass return False def _safe_unicode(s: Any) -> str: try: return _unicode(s) except UnicodeDecodeError: return repr(s) class LogFormatter(logging.Formatter): """Log formatter used in Tornado. Key features of this formatter are: * Color support when logging to a terminal that supports it. * Timestamps on every log line. * Robust against str/bytes encoding problems. This formatter is enabled automatically by `tornado.options.parse_command_line` or `tornado.options.parse_config_file` (unless ``--logging=none`` is used). Color support on Windows versions that do not support ANSI color codes is enabled by use of the colorama__ library. Applications that wish to use this must first initialize colorama with a call to ``colorama.init``. See the colorama documentation for details. __ https://pypi.python.org/pypi/colorama .. versionchanged:: 4.5 Added support for ``colorama``. Changed the constructor signature to be compatible with `logging.config.dictConfig`. """ DEFAULT_FORMAT = "%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s" # noqa: E501 DEFAULT_DATE_FORMAT = "%y%m%d %H:%M:%S" DEFAULT_COLORS = { logging.DEBUG: 4, # Blue logging.INFO: 2, # Green logging.WARNING: 3, # Yellow logging.ERROR: 1, # Red logging.CRITICAL: 5, # Magenta } def __init__( self, fmt: str = DEFAULT_FORMAT, datefmt: str = DEFAULT_DATE_FORMAT, style: str = "%", color: bool = True, colors: Dict[int, int] = DEFAULT_COLORS, ) -> None: r""" :arg bool color: Enables color support. :arg str fmt: Log message format. It will be applied to the attributes dict of log records. The text between ``%(color)s`` and ``%(end_color)s`` will be colored depending on the level if color support is on. :arg dict colors: color mappings from logging level to terminal color code :arg str datefmt: Datetime format. Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. .. versionchanged:: 3.2 Added ``fmt`` and ``datefmt`` arguments. """ logging.Formatter.__init__(self, datefmt=datefmt) self._fmt = fmt self._colors = {} # type: Dict[int, str] if color and _stderr_supports_color(): if curses is not None: fg_color = curses.tigetstr("setaf") or curses.tigetstr("setf") or b"" for levelno, code in colors.items(): # Convert the terminal control characters from # bytes to unicode strings for easier use with the # logging module. self._colors[levelno] = unicode_type( curses.tparm(fg_color, code), "ascii" ) self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii") else: # If curses is not present (currently we'll only get here for # colorama on windows), assume hard-coded ANSI color codes. for levelno, code in colors.items(): self._colors[levelno] = "\033[2;3%dm" % code self._normal = "\033[0m" else: self._normal = "" def format(self, record: Any) -> str: try: message = record.getMessage() assert isinstance(message, basestring_type) # guaranteed by logging # Encoding notes: The logging module prefers to work with character # strings, but only enforces that log messages are instances of # basestring. In python 2, non-ascii bytestrings will make # their way through the logging framework until they blow up with # an unhelpful decoding error (with this formatter it happens # when we attach the prefix, but there are other opportunities for # exceptions further along in the framework). # # If a byte string makes it this far, convert it to unicode to # ensure it will make it out to the logs. Use repr() as a fallback # to ensure that all byte strings can be converted successfully, # but don't do it by default so we don't add extra quotes to ascii # bytestrings. This is a bit of a hacky place to do this, but # it's worth it since the encoding errors that would otherwise # result are so useless (and tornado is fond of using utf8-encoded # byte strings wherever possible). record.message = _safe_unicode(message) except Exception as e: record.message = "Bad message (%r): %r" % (e, record.__dict__) record.asctime = self.formatTime(record, cast(str, self.datefmt)) if record.levelno in self._colors: record.color = self._colors[record.levelno] record.end_color = self._normal else: record.color = record.end_color = "" formatted = self._fmt % record.__dict__ if record.exc_info: if not record.exc_text: record.exc_text = self.formatException(record.exc_info) if record.exc_text: # exc_text contains multiple lines. We need to _safe_unicode # each line separately so that non-utf8 bytes don't cause # all the newlines to turn into '\n'. lines = [formatted.rstrip()] lines.extend(_safe_unicode(ln) for ln in record.exc_text.split("\n")) formatted = "\n".join(lines) return formatted.replace("\n", "\n ") def enable_pretty_logging( options: Any = None, logger: Optional[logging.Logger] = None ) -> None: """Turns on formatted logging output as configured. This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`. """ if options is None: import tornado.options options = tornado.options.options if options.logging is None or options.logging.lower() == "none": return if logger is None: logger = logging.getLogger() logger.setLevel(getattr(logging, options.logging.upper())) if options.log_file_prefix: rotate_mode = options.log_rotate_mode if rotate_mode == "size": channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups, encoding="utf-8", ) # type: logging.Handler elif rotate_mode == "time": channel = logging.handlers.TimedRotatingFileHandler( filename=options.log_file_prefix, when=options.log_rotate_when, interval=options.log_rotate_interval, backupCount=options.log_file_num_backups, encoding="utf-8", ) else: error_message = ( "The value of log_rotate_mode option should be " + '"size" or "time", not "%s".' % rotate_mode ) raise ValueError(error_message) channel.setFormatter(LogFormatter(color=False)) logger.addHandler(channel) if options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers): # Set up color if we are in a tty and curses is installed channel = logging.StreamHandler() channel.setFormatter(LogFormatter()) logger.addHandler(channel) def define_logging_options(options: Any = None) -> None: """Add logging-related flags to ``options``. These options are present automatically on the default options instance; this method is only necessary if you have created your own `.OptionParser`. .. versionadded:: 4.2 This function existed in prior versions but was broken and undocumented until 4.2. """ if options is None: # late import to prevent cycle import tornado.options options = tornado.options.options options.define( "logging", default="info", help=( "Set the Python log level. If 'none', tornado won't touch the " "logging configuration." ), metavar="debug|info|warning|error|none", ) options.define( "log_to_stderr", type=bool, default=None, help=( "Send log output to stderr (colorized if possible). " "By default use stderr if --log_file_prefix is not set and " "no other logging is configured." ), ) options.define( "log_file_prefix", type=str, default=None, metavar="PATH", help=( "Path prefix for log files. " "Note that if you are running multiple tornado processes, " "log_file_prefix must be different for each of them (e.g. " "include the port number)" ), ) options.define( "log_file_max_size", type=int, default=100 * 1000 * 1000, help="max size of log files before rollover", ) options.define( "log_file_num_backups", type=int, default=10, help="number of log files to keep" ) options.define( "log_rotate_when", type=str, default="midnight", help=( "specify the type of TimedRotatingFileHandler interval " "other options:('S', 'M', 'H', 'D', 'W0'-'W6')" ), ) options.define( "log_rotate_interval", type=int, default=1, help="The interval value of timed rotating", ) options.define( "log_rotate_mode", type=str, default="size", help="The mode of rotating files(time or size)", ) options.add_parse_callback(lambda: enable_pretty_logging(options)) tornado-6.1.0/tornado/netutil.py000066400000000000000000000546001374705040500167020ustar00rootroot00000000000000# # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Miscellaneous network utility code.""" import concurrent.futures import errno import os import sys import socket import ssl import stat from tornado.concurrent import dummy_executor, run_on_executor from tornado.ioloop import IOLoop from tornado.util import Configurable, errno_from_exception from typing import List, Callable, Any, Type, Dict, Union, Tuple, Awaitable, Optional # Note that the naming of ssl.Purpose is confusing; the purpose # of a context is to authentiate the opposite side of the connection. _client_ssl_defaults = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) _server_ssl_defaults = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) if hasattr(ssl, "OP_NO_COMPRESSION"): # See netutil.ssl_options_to_context _client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION _server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION # ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode, # getaddrinfo attempts to import encodings.idna. If this is done at # module-import time, the import lock is already held by the main thread, # leading to deadlock. Avoid it by caching the idna encoder on the main # thread now. u"foo".encode("idna") # For undiagnosed reasons, 'latin1' codec may also need to be preloaded. u"foo".encode("latin1") # Default backlog used when calling sock.listen() _DEFAULT_BACKLOG = 128 def bind_sockets( port: int, address: Optional[str] = None, family: socket.AddressFamily = socket.AF_UNSPEC, backlog: int = _DEFAULT_BACKLOG, flags: Optional[int] = None, reuse_port: bool = False, ) -> List[socket.socket]: """Creates listening sockets bound to the given port and address. Returns a list of socket objects (multiple sockets are returned if the given address maps to multiple IP addresses, which is most common for mixed IPv4 and IPv6 use). Address may be either an IP address or hostname. If it's a hostname, the server will listen on all IP addresses associated with the name. Address may be an empty string or None to listen on all available interfaces. Family may be set to either `socket.AF_INET` or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise both will be used if available. The ``backlog`` argument has the same meaning as for `socket.listen() `. ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``. ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket in the list. If your platform doesn't support this option ValueError will be raised. """ if reuse_port and not hasattr(socket, "SO_REUSEPORT"): raise ValueError("the platform doesn't support SO_REUSEPORT") sockets = [] if address == "": address = None if not socket.has_ipv6 and family == socket.AF_UNSPEC: # Python can be compiled with --disable-ipv6, which causes # operations on AF_INET6 sockets to fail, but does not # automatically exclude those results from getaddrinfo # results. # http://bugs.python.org/issue16208 family = socket.AF_INET if flags is None: flags = socket.AI_PASSIVE bound_port = None unique_addresses = set() # type: set for res in sorted( socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags), key=lambda x: x[0], ): if res in unique_addresses: continue unique_addresses.add(res) af, socktype, proto, canonname, sockaddr = res if ( sys.platform == "darwin" and address == "localhost" and af == socket.AF_INET6 and sockaddr[3] != 0 ): # Mac OS X includes a link-local address fe80::1%lo0 in the # getaddrinfo results for 'localhost'. However, the firewall # doesn't understand that this is a local address and will # prompt for access (often repeatedly, due to an apparent # bug in its ability to remember granting access to an # application). Skip these addresses. continue try: sock = socket.socket(af, socktype, proto) except socket.error as e: if errno_from_exception(e) == errno.EAFNOSUPPORT: continue raise if os.name != "nt": try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except socket.error as e: if errno_from_exception(e) != errno.ENOPROTOOPT: # Hurd doesn't support SO_REUSEADDR. raise if reuse_port: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) if af == socket.AF_INET6: # On linux, ipv6 sockets accept ipv4 too by default, # but this makes it impossible to bind to both # 0.0.0.0 in ipv4 and :: in ipv6. On other systems, # separate sockets *must* be used to listen for both ipv4 # and ipv6. For consistency, always disable ipv4 on our # ipv6 sockets and use a separate ipv4 socket when needed. # # Python 2.x on windows doesn't have IPPROTO_IPV6. if hasattr(socket, "IPPROTO_IPV6"): sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) # automatic port allocation with port=None # should bind on the same port on IPv4 and IPv6 host, requested_port = sockaddr[:2] if requested_port == 0 and bound_port is not None: sockaddr = tuple([host, bound_port] + list(sockaddr[2:])) sock.setblocking(False) try: sock.bind(sockaddr) except OSError as e: if ( errno_from_exception(e) == errno.EADDRNOTAVAIL and address == "localhost" and sockaddr[0] == "::1" ): # On some systems (most notably docker with default # configurations), ipv6 is partially disabled: # socket.has_ipv6 is true, we can create AF_INET6 # sockets, and getaddrinfo("localhost", ..., # AF_PASSIVE) resolves to ::1, but we get an error # when binding. # # Swallow the error, but only for this specific case. # If EADDRNOTAVAIL occurs in other situations, it # might be a real problem like a typo in a # configuration. sock.close() continue else: raise bound_port = sock.getsockname()[1] sock.listen(backlog) sockets.append(sock) return sockets if hasattr(socket, "AF_UNIX"): def bind_unix_socket( file: str, mode: int = 0o600, backlog: int = _DEFAULT_BACKLOG ) -> socket.socket: """Creates a listening unix socket. If a socket with the given name already exists, it will be deleted. If any other file with that name exists, an exception will be raised. Returns a socket object (not a list of socket objects like `bind_sockets`) """ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except socket.error as e: if errno_from_exception(e) != errno.ENOPROTOOPT: # Hurd doesn't support SO_REUSEADDR raise sock.setblocking(False) try: st = os.stat(file) except FileNotFoundError: pass else: if stat.S_ISSOCK(st.st_mode): os.remove(file) else: raise ValueError("File %s exists and is not a socket", file) sock.bind(file) os.chmod(file, mode) sock.listen(backlog) return sock def add_accept_handler( sock: socket.socket, callback: Callable[[socket.socket, Any], None] ) -> Callable[[], None]: """Adds an `.IOLoop` event handler to accept new connections on ``sock``. When a connection is accepted, ``callback(connection, address)`` will be run (``connection`` is a socket object, and ``address`` is the address of the other end of the connection). Note that this signature is different from the ``callback(fd, events)`` signature used for `.IOLoop` handlers. A callable is returned which, when called, will remove the `.IOLoop` event handler and stop processing further incoming connections. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. .. versionchanged:: 5.0 A callable is returned (``None`` was returned before). """ io_loop = IOLoop.current() removed = [False] def accept_handler(fd: socket.socket, events: int) -> None: # More connections may come in while we're handling callbacks; # to prevent starvation of other tasks we must limit the number # of connections we accept at a time. Ideally we would accept # up to the number of connections that were waiting when we # entered this method, but this information is not available # (and rearranging this method to call accept() as many times # as possible before running any callbacks would have adverse # effects on load balancing in multiprocess configurations). # Instead, we use the (default) listen backlog as a rough # heuristic for the number of connections we can reasonably # accept at once. for i in range(_DEFAULT_BACKLOG): if removed[0]: # The socket was probably closed return try: connection, address = sock.accept() except BlockingIOError: # EWOULDBLOCK indicates we have accepted every # connection that is available. return except ConnectionAbortedError: # ECONNABORTED indicates that there was a connection # but it was closed while still in the accept queue. # (observed on FreeBSD). continue callback(connection, address) def remove_handler() -> None: io_loop.remove_handler(sock) removed[0] = True io_loop.add_handler(sock, accept_handler, IOLoop.READ) return remove_handler def is_valid_ip(ip: str) -> bool: """Returns ``True`` if the given string is a well-formed IP address. Supports IPv4 and IPv6. """ if not ip or "\x00" in ip: # getaddrinfo resolves empty strings to localhost, and truncates # on zero bytes. return False try: res = socket.getaddrinfo( ip, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_NUMERICHOST ) return bool(res) except socket.gaierror as e: if e.args[0] == socket.EAI_NONAME: return False raise return True class Resolver(Configurable): """Configurable asynchronous DNS resolver interface. By default, a blocking implementation is used (which simply calls `socket.getaddrinfo`). An alternative implementation can be chosen with the `Resolver.configure <.Configurable.configure>` class method:: Resolver.configure('tornado.netutil.ThreadedResolver') The implementations of this interface included with Tornado are * `tornado.netutil.DefaultExecutorResolver` * `tornado.netutil.BlockingResolver` (deprecated) * `tornado.netutil.ThreadedResolver` (deprecated) * `tornado.netutil.OverrideResolver` * `tornado.platform.twisted.TwistedResolver` * `tornado.platform.caresresolver.CaresResolver` .. versionchanged:: 5.0 The default implementation has changed from `BlockingResolver` to `DefaultExecutorResolver`. """ @classmethod def configurable_base(cls) -> Type["Resolver"]: return Resolver @classmethod def configurable_default(cls) -> Type["Resolver"]: return DefaultExecutorResolver def resolve( self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC ) -> Awaitable[List[Tuple[int, Any]]]: """Resolves an address. The ``host`` argument is a string which may be a hostname or a literal IP address. Returns a `.Future` whose result is a list of (family, address) pairs, where address is a tuple suitable to pass to `socket.connect ` (i.e. a ``(host, port)`` pair for IPv4; additional fields may be present for IPv6). If a ``callback`` is passed, it will be run with the result as an argument when it is complete. :raises IOError: if the address cannot be resolved. .. versionchanged:: 4.4 Standardized all implementations to raise `IOError`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ raise NotImplementedError() def close(self) -> None: """Closes the `Resolver`, freeing any resources used. .. versionadded:: 3.1 """ pass def _resolve_addr( host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC ) -> List[Tuple[int, Any]]: # On Solaris, getaddrinfo fails if the given port is not found # in /etc/services and no socket type is given, so we must pass # one here. The socket type used here doesn't seem to actually # matter (we discard the one we get back in the results), # so the addresses we return should still be usable with SOCK_DGRAM. addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM) results = [] for fam, socktype, proto, canonname, address in addrinfo: results.append((fam, address)) return results # type: ignore class DefaultExecutorResolver(Resolver): """Resolver implementation using `.IOLoop.run_in_executor`. .. versionadded:: 5.0 """ async def resolve( self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC ) -> List[Tuple[int, Any]]: result = await IOLoop.current().run_in_executor( None, _resolve_addr, host, port, family ) return result class ExecutorResolver(Resolver): """Resolver implementation using a `concurrent.futures.Executor`. Use this instead of `ThreadedResolver` when you require additional control over the executor being used. The executor will be shut down when the resolver is closed unless ``close_resolver=False``; use this if you want to reuse the same executor elsewhere. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. .. deprecated:: 5.0 The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead of this class. """ def initialize( self, executor: Optional[concurrent.futures.Executor] = None, close_executor: bool = True, ) -> None: self.io_loop = IOLoop.current() if executor is not None: self.executor = executor self.close_executor = close_executor else: self.executor = dummy_executor self.close_executor = False def close(self) -> None: if self.close_executor: self.executor.shutdown() self.executor = None # type: ignore @run_on_executor def resolve( self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC ) -> List[Tuple[int, Any]]: return _resolve_addr(host, port, family) class BlockingResolver(ExecutorResolver): """Default `Resolver` implementation, using `socket.getaddrinfo`. The `.IOLoop` will be blocked during the resolution, although the callback will not be run until the next `.IOLoop` iteration. .. deprecated:: 5.0 The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead of this class. """ def initialize(self) -> None: # type: ignore super().initialize() class ThreadedResolver(ExecutorResolver): """Multithreaded non-blocking `Resolver` implementation. Requires the `concurrent.futures` package to be installed (available in the standard library since Python 3.2, installable with ``pip install futures`` in older versions). The thread pool size can be configured with:: Resolver.configure('tornado.netutil.ThreadedResolver', num_threads=10) .. versionchanged:: 3.1 All ``ThreadedResolvers`` share a single thread pool, whose size is set by the first one to be created. .. deprecated:: 5.0 The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead of this class. """ _threadpool = None # type: ignore _threadpool_pid = None # type: int def initialize(self, num_threads: int = 10) -> None: # type: ignore threadpool = ThreadedResolver._create_threadpool(num_threads) super().initialize(executor=threadpool, close_executor=False) @classmethod def _create_threadpool( cls, num_threads: int ) -> concurrent.futures.ThreadPoolExecutor: pid = os.getpid() if cls._threadpool_pid != pid: # Threads cannot survive after a fork, so if our pid isn't what it # was when we created the pool then delete it. cls._threadpool = None if cls._threadpool is None: cls._threadpool = concurrent.futures.ThreadPoolExecutor(num_threads) cls._threadpool_pid = pid return cls._threadpool class OverrideResolver(Resolver): """Wraps a resolver with a mapping of overrides. This can be used to make local DNS changes (e.g. for testing) without modifying system-wide settings. The mapping can be in three formats:: { # Hostname to host or ip "example.com": "127.0.1.1", # Host+port to host+port ("login.example.com", 443): ("localhost", 1443), # Host+port+address family to host+port ("login.example.com", 443, socket.AF_INET6): ("::1", 1443), } .. versionchanged:: 5.0 Added support for host-port-family triplets. """ def initialize(self, resolver: Resolver, mapping: dict) -> None: self.resolver = resolver self.mapping = mapping def close(self) -> None: self.resolver.close() def resolve( self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC ) -> Awaitable[List[Tuple[int, Any]]]: if (host, port, family) in self.mapping: host, port = self.mapping[(host, port, family)] elif (host, port) in self.mapping: host, port = self.mapping[(host, port)] elif host in self.mapping: host = self.mapping[host] return self.resolver.resolve(host, port, family) # These are the keyword arguments to ssl.wrap_socket that must be translated # to their SSLContext equivalents (the other arguments are still passed # to SSLContext.wrap_socket). _SSL_CONTEXT_KEYWORDS = frozenset( ["ssl_version", "certfile", "keyfile", "cert_reqs", "ca_certs", "ciphers"] ) def ssl_options_to_context( ssl_options: Union[Dict[str, Any], ssl.SSLContext] ) -> ssl.SSLContext: """Try to convert an ``ssl_options`` dictionary to an `~ssl.SSLContext` object. The ``ssl_options`` dictionary contains keywords to be passed to `ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can be used instead. This function converts the dict form to its `~ssl.SSLContext` equivalent, and may be used when a component which accepts both forms needs to upgrade to the `~ssl.SSLContext` version to use features like SNI or NPN. """ if isinstance(ssl_options, ssl.SSLContext): return ssl_options assert isinstance(ssl_options, dict) assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options # Can't use create_default_context since this interface doesn't # tell us client vs server. context = ssl.SSLContext(ssl_options.get("ssl_version", ssl.PROTOCOL_SSLv23)) if "certfile" in ssl_options: context.load_cert_chain( ssl_options["certfile"], ssl_options.get("keyfile", None) ) if "cert_reqs" in ssl_options: context.verify_mode = ssl_options["cert_reqs"] if "ca_certs" in ssl_options: context.load_verify_locations(ssl_options["ca_certs"]) if "ciphers" in ssl_options: context.set_ciphers(ssl_options["ciphers"]) if hasattr(ssl, "OP_NO_COMPRESSION"): # Disable TLS compression to avoid CRIME and related attacks. # This constant depends on openssl version 1.0. # TODO: Do we need to do this ourselves or can we trust # the defaults? context.options |= ssl.OP_NO_COMPRESSION return context def ssl_wrap_socket( socket: socket.socket, ssl_options: Union[Dict[str, Any], ssl.SSLContext], server_hostname: Optional[str] = None, **kwargs: Any ) -> ssl.SSLSocket: """Returns an ``ssl.SSLSocket`` wrapping the given socket. ``ssl_options`` may be either an `ssl.SSLContext` object or a dictionary (as accepted by `ssl_options_to_context`). Additional keyword arguments are passed to ``wrap_socket`` (either the `~ssl.SSLContext` method or the `ssl` module function as appropriate). """ context = ssl_options_to_context(ssl_options) if ssl.HAS_SNI: # In python 3.4, wrap_socket only accepts the server_hostname # argument if HAS_SNI is true. # TODO: add a unittest (python added server-side SNI support in 3.4) # In the meantime it can be manually tested with # python3 -m tornado.httpclient https://sni.velox.ch return context.wrap_socket(socket, server_hostname=server_hostname, **kwargs) else: return context.wrap_socket(socket, **kwargs) tornado-6.1.0/tornado/options.py000066400000000000000000000620011374705040500167030ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A command line parsing module that lets modules define their own options. This module is inspired by Google's `gflags `_. The primary difference with libraries such as `argparse` is that a global registry is used so that options may be defined in any module (it also enables `tornado.log` by default). The rest of Tornado does not depend on this module, so feel free to use `argparse` or other configuration libraries if you prefer them. Options must be defined with `tornado.options.define` before use, generally at the top level of a module. The options are then accessible as attributes of `tornado.options.options`:: # myapp/db.py from tornado.options import define, options define("mysql_host", default="127.0.0.1:3306", help="Main user DB") define("memcache_hosts", default="127.0.0.1:11011", multiple=True, help="Main user memcache servers") def connect(): db = database.Connection(options.mysql_host) ... # myapp/server.py from tornado.options import define, options define("port", default=8080, help="port to listen on") def start_server(): app = make_app() app.listen(options.port) The ``main()`` method of your application does not need to be aware of all of the options used throughout your program; they are all automatically loaded when the modules are loaded. However, all modules that define options must have been imported before the command line is parsed. Your ``main()`` method can parse the command line or parse a config file with either `parse_command_line` or `parse_config_file`:: import myapp.db, myapp.server import tornado.options if __name__ == '__main__': tornado.options.parse_command_line() # or tornado.options.parse_config_file("/etc/server.conf") .. note:: When using multiple ``parse_*`` functions, pass ``final=False`` to all but the last one, or side effects may occur twice (in particular, this can result in log messages being doubled). `tornado.options.options` is a singleton instance of `OptionParser`, and the top-level functions in this module (`define`, `parse_command_line`, etc) simply call methods on it. You may create additional `OptionParser` instances to define isolated sets of options, such as for subcommands. .. note:: By default, several options are defined that will configure the standard `logging` module when `parse_command_line` or `parse_config_file` are called. If you want Tornado to leave the logging configuration alone so you can manage it yourself, either pass ``--logging=none`` on the command line or do the following to disable it in code:: from tornado.options import options, parse_command_line options.logging = None parse_command_line() .. versionchanged:: 4.3 Dashes and underscores are fully interchangeable in option names; options can be defined, set, and read with any mix of the two. Dashes are typical for command-line usage while config files require underscores. """ import datetime import numbers import re import sys import os import textwrap from tornado.escape import _unicode, native_str from tornado.log import define_logging_options from tornado.util import basestring_type, exec_in from typing import ( Any, Iterator, Iterable, Tuple, Set, Dict, Callable, List, TextIO, Optional, ) class Error(Exception): """Exception raised by errors in the options module.""" pass class OptionParser(object): """A collection of options, a dictionary with object-like access. Normally accessed via static functions in the `tornado.options` module, which reference a global instance. """ def __init__(self) -> None: # we have to use self.__dict__ because we override setattr. self.__dict__["_options"] = {} self.__dict__["_parse_callbacks"] = [] self.define( "help", type=bool, help="show this help information", callback=self._help_callback, ) def _normalize_name(self, name: str) -> str: return name.replace("_", "-") def __getattr__(self, name: str) -> Any: name = self._normalize_name(name) if isinstance(self._options.get(name), _Option): return self._options[name].value() raise AttributeError("Unrecognized option %r" % name) def __setattr__(self, name: str, value: Any) -> None: name = self._normalize_name(name) if isinstance(self._options.get(name), _Option): return self._options[name].set(value) raise AttributeError("Unrecognized option %r" % name) def __iter__(self) -> Iterator: return (opt.name for opt in self._options.values()) def __contains__(self, name: str) -> bool: name = self._normalize_name(name) return name in self._options def __getitem__(self, name: str) -> Any: return self.__getattr__(name) def __setitem__(self, name: str, value: Any) -> None: return self.__setattr__(name, value) def items(self) -> Iterable[Tuple[str, Any]]: """An iterable of (name, value) pairs. .. versionadded:: 3.1 """ return [(opt.name, opt.value()) for name, opt in self._options.items()] def groups(self) -> Set[str]: """The set of option-groups created by ``define``. .. versionadded:: 3.1 """ return set(opt.group_name for opt in self._options.values()) def group_dict(self, group: str) -> Dict[str, Any]: """The names and values of options in a group. Useful for copying options into Application settings:: from tornado.options import define, parse_command_line, options define('template_path', group='application') define('static_path', group='application') parse_command_line() application = Application( handlers, **options.group_dict('application')) .. versionadded:: 3.1 """ return dict( (opt.name, opt.value()) for name, opt in self._options.items() if not group or group == opt.group_name ) def as_dict(self) -> Dict[str, Any]: """The names and values of all options. .. versionadded:: 3.1 """ return dict((opt.name, opt.value()) for name, opt in self._options.items()) def define( self, name: str, default: Any = None, type: Optional[type] = None, help: Optional[str] = None, metavar: Optional[str] = None, multiple: bool = False, group: Optional[str] = None, callback: Optional[Callable[[Any], None]] = None, ) -> None: """Defines a new command line option. ``type`` can be any of `str`, `int`, `float`, `bool`, `~datetime.datetime`, or `~datetime.timedelta`. If no ``type`` is given but a ``default`` is, ``type`` is the type of ``default``. Otherwise, ``type`` defaults to `str`. If ``multiple`` is True, the option value is a list of ``type`` instead of an instance of ``type``. ``help`` and ``metavar`` are used to construct the automatically generated command line help string. The help message is formatted like:: --name=METAVAR help string ``group`` is used to group the defined options in logical groups. By default, command line options are grouped by the file in which they are defined. Command line option names must be unique globally. If a ``callback`` is given, it will be run with the new value whenever the option is changed. This can be used to combine command-line and file-based options:: define("config", type=str, help="path to config file", callback=lambda path: parse_config_file(path, final=False)) With this definition, options in the file specified by ``--config`` will override options set earlier on the command line, but can be overridden by later flags. """ normalized = self._normalize_name(name) if normalized in self._options: raise Error( "Option %r already defined in %s" % (normalized, self._options[normalized].file_name) ) frame = sys._getframe(0) options_file = frame.f_code.co_filename # Can be called directly, or through top level define() fn, in which # case, step up above that frame to look for real caller. if ( frame.f_back.f_code.co_filename == options_file and frame.f_back.f_code.co_name == "define" ): frame = frame.f_back file_name = frame.f_back.f_code.co_filename if file_name == options_file: file_name = "" if type is None: if not multiple and default is not None: type = default.__class__ else: type = str if group: group_name = group # type: Optional[str] else: group_name = file_name option = _Option( name, file_name=file_name, default=default, type=type, help=help, metavar=metavar, multiple=multiple, group_name=group_name, callback=callback, ) self._options[normalized] = option def parse_command_line( self, args: Optional[List[str]] = None, final: bool = True ) -> List[str]: """Parses all options given on the command line (defaults to `sys.argv`). Options look like ``--option=value`` and are parsed according to their ``type``. For boolean options, ``--option`` is equivalent to ``--option=true`` If the option has ``multiple=True``, comma-separated values are accepted. For multi-value integer options, the syntax ``x:y`` is also accepted and equivalent to ``range(x, y)``. Note that ``args[0]`` is ignored since it is the program name in `sys.argv`. We return a list of all arguments that are not parsed as options. If ``final`` is ``False``, parse callbacks will not be run. This is useful for applications that wish to combine configurations from multiple sources. """ if args is None: args = sys.argv remaining = [] # type: List[str] for i in range(1, len(args)): # All things after the last option are command line arguments if not args[i].startswith("-"): remaining = args[i:] break if args[i] == "--": remaining = args[i + 1 :] break arg = args[i].lstrip("-") name, equals, value = arg.partition("=") name = self._normalize_name(name) if name not in self._options: self.print_help() raise Error("Unrecognized command line option: %r" % name) option = self._options[name] if not equals: if option.type == bool: value = "true" else: raise Error("Option %r requires a value" % name) option.parse(value) if final: self.run_parse_callbacks() return remaining def parse_config_file(self, path: str, final: bool = True) -> None: """Parses and loads the config file at the given path. The config file contains Python code that will be executed (so it is **not safe** to use untrusted config files). Anything in the global namespace that matches a defined option will be used to set that option's value. Options may either be the specified type for the option or strings (in which case they will be parsed the same way as in `.parse_command_line`) Example (using the options defined in the top-level docs of this module):: port = 80 mysql_host = 'mydb.example.com:3306' # Both lists and comma-separated strings are allowed for # multiple=True. memcache_hosts = ['cache1.example.com:11011', 'cache2.example.com:11011'] memcache_hosts = 'cache1.example.com:11011,cache2.example.com:11011' If ``final`` is ``False``, parse callbacks will not be run. This is useful for applications that wish to combine configurations from multiple sources. .. note:: `tornado.options` is primarily a command-line library. Config file support is provided for applications that wish to use it, but applications that prefer config files may wish to look at other libraries instead. .. versionchanged:: 4.1 Config files are now always interpreted as utf-8 instead of the system default encoding. .. versionchanged:: 4.4 The special variable ``__file__`` is available inside config files, specifying the absolute path to the config file itself. .. versionchanged:: 5.1 Added the ability to set options via strings in config files. """ config = {"__file__": os.path.abspath(path)} with open(path, "rb") as f: exec_in(native_str(f.read()), config, config) for name in config: normalized = self._normalize_name(name) if normalized in self._options: option = self._options[normalized] if option.multiple: if not isinstance(config[name], (list, str)): raise Error( "Option %r is required to be a list of %s " "or a comma-separated string" % (option.name, option.type.__name__) ) if type(config[name]) == str and option.type != str: option.parse(config[name]) else: option.set(config[name]) if final: self.run_parse_callbacks() def print_help(self, file: Optional[TextIO] = None) -> None: """Prints all the command line options to stderr (or another file).""" if file is None: file = sys.stderr print("Usage: %s [OPTIONS]" % sys.argv[0], file=file) print("\nOptions:\n", file=file) by_group = {} # type: Dict[str, List[_Option]] for option in self._options.values(): by_group.setdefault(option.group_name, []).append(option) for filename, o in sorted(by_group.items()): if filename: print("\n%s options:\n" % os.path.normpath(filename), file=file) o.sort(key=lambda option: option.name) for option in o: # Always print names with dashes in a CLI context. prefix = self._normalize_name(option.name) if option.metavar: prefix += "=" + option.metavar description = option.help or "" if option.default is not None and option.default != "": description += " (default %s)" % option.default lines = textwrap.wrap(description, 79 - 35) if len(prefix) > 30 or len(lines) == 0: lines.insert(0, "") print(" --%-30s %s" % (prefix, lines[0]), file=file) for line in lines[1:]: print("%-34s %s" % (" ", line), file=file) print(file=file) def _help_callback(self, value: bool) -> None: if value: self.print_help() sys.exit(0) def add_parse_callback(self, callback: Callable[[], None]) -> None: """Adds a parse callback, to be invoked when option parsing is done.""" self._parse_callbacks.append(callback) def run_parse_callbacks(self) -> None: for callback in self._parse_callbacks: callback() def mockable(self) -> "_Mockable": """Returns a wrapper around self that is compatible with `mock.patch `. The `mock.patch ` function (included in the standard library `unittest.mock` package since Python 3.3, or in the third-party ``mock`` package for older versions of Python) is incompatible with objects like ``options`` that override ``__getattr__`` and ``__setattr__``. This function returns an object that can be used with `mock.patch.object ` to modify option values:: with mock.patch.object(options.mockable(), 'name', value): assert options.name == value """ return _Mockable(self) class _Mockable(object): """`mock.patch` compatible wrapper for `OptionParser`. As of ``mock`` version 1.0.1, when an object uses ``__getattr__`` hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete the attribute it set instead of setting a new one (assuming that the object does not capture ``__setattr__``, so the patch created a new attribute in ``__dict__``). _Mockable's getattr and setattr pass through to the underlying OptionParser, and delattr undoes the effect of a previous setattr. """ def __init__(self, options: OptionParser) -> None: # Modify __dict__ directly to bypass __setattr__ self.__dict__["_options"] = options self.__dict__["_originals"] = {} def __getattr__(self, name: str) -> Any: return getattr(self._options, name) def __setattr__(self, name: str, value: Any) -> None: assert name not in self._originals, "don't reuse mockable objects" self._originals[name] = getattr(self._options, name) setattr(self._options, name, value) def __delattr__(self, name: str) -> None: setattr(self._options, name, self._originals.pop(name)) class _Option(object): # This class could almost be made generic, but the way the types # interact with the multiple argument makes this tricky. (default # and the callback use List[T], but type is still Type[T]). UNSET = object() def __init__( self, name: str, default: Any = None, type: Optional[type] = None, help: Optional[str] = None, metavar: Optional[str] = None, multiple: bool = False, file_name: Optional[str] = None, group_name: Optional[str] = None, callback: Optional[Callable[[Any], None]] = None, ) -> None: if default is None and multiple: default = [] self.name = name if type is None: raise ValueError("type must not be None") self.type = type self.help = help self.metavar = metavar self.multiple = multiple self.file_name = file_name self.group_name = group_name self.callback = callback self.default = default self._value = _Option.UNSET # type: Any def value(self) -> Any: return self.default if self._value is _Option.UNSET else self._value def parse(self, value: str) -> Any: _parse = { datetime.datetime: self._parse_datetime, datetime.timedelta: self._parse_timedelta, bool: self._parse_bool, basestring_type: self._parse_string, }.get( self.type, self.type ) # type: Callable[[str], Any] if self.multiple: self._value = [] for part in value.split(","): if issubclass(self.type, numbers.Integral): # allow ranges of the form X:Y (inclusive at both ends) lo_str, _, hi_str = part.partition(":") lo = _parse(lo_str) hi = _parse(hi_str) if hi_str else lo self._value.extend(range(lo, hi + 1)) else: self._value.append(_parse(part)) else: self._value = _parse(value) if self.callback is not None: self.callback(self._value) return self.value() def set(self, value: Any) -> None: if self.multiple: if not isinstance(value, list): raise Error( "Option %r is required to be a list of %s" % (self.name, self.type.__name__) ) for item in value: if item is not None and not isinstance(item, self.type): raise Error( "Option %r is required to be a list of %s" % (self.name, self.type.__name__) ) else: if value is not None and not isinstance(value, self.type): raise Error( "Option %r is required to be a %s (%s given)" % (self.name, self.type.__name__, type(value)) ) self._value = value if self.callback is not None: self.callback(self._value) # Supported date/time formats in our options _DATETIME_FORMATS = [ "%a %b %d %H:%M:%S %Y", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%dT%H:%M", "%Y%m%d %H:%M:%S", "%Y%m%d %H:%M", "%Y-%m-%d", "%Y%m%d", "%H:%M:%S", "%H:%M", ] def _parse_datetime(self, value: str) -> datetime.datetime: for format in self._DATETIME_FORMATS: try: return datetime.datetime.strptime(value, format) except ValueError: pass raise Error("Unrecognized date/time format: %r" % value) _TIMEDELTA_ABBREV_DICT = { "h": "hours", "m": "minutes", "min": "minutes", "s": "seconds", "sec": "seconds", "ms": "milliseconds", "us": "microseconds", "d": "days", "w": "weeks", } _FLOAT_PATTERN = r"[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?" _TIMEDELTA_PATTERN = re.compile( r"\s*(%s)\s*(\w*)\s*" % _FLOAT_PATTERN, re.IGNORECASE ) def _parse_timedelta(self, value: str) -> datetime.timedelta: try: sum = datetime.timedelta() start = 0 while start < len(value): m = self._TIMEDELTA_PATTERN.match(value, start) if not m: raise Exception() num = float(m.group(1)) units = m.group(2) or "seconds" units = self._TIMEDELTA_ABBREV_DICT.get(units, units) sum += datetime.timedelta(**{units: num}) start = m.end() return sum except Exception: raise def _parse_bool(self, value: str) -> bool: return value.lower() not in ("false", "0", "f") def _parse_string(self, value: str) -> str: return _unicode(value) options = OptionParser() """Global options object. All defined options are available as attributes on this object. """ def define( name: str, default: Any = None, type: Optional[type] = None, help: Optional[str] = None, metavar: Optional[str] = None, multiple: bool = False, group: Optional[str] = None, callback: Optional[Callable[[Any], None]] = None, ) -> None: """Defines an option in the global namespace. See `OptionParser.define`. """ return options.define( name, default=default, type=type, help=help, metavar=metavar, multiple=multiple, group=group, callback=callback, ) def parse_command_line( args: Optional[List[str]] = None, final: bool = True ) -> List[str]: """Parses global options from the command line. See `OptionParser.parse_command_line`. """ return options.parse_command_line(args, final=final) def parse_config_file(path: str, final: bool = True) -> None: """Parses global options from a config file. See `OptionParser.parse_config_file`. """ return options.parse_config_file(path, final=final) def print_help(file: Optional[TextIO] = None) -> None: """Prints all the command line options to stderr (or another file). See `OptionParser.print_help`. """ return options.print_help(file) def add_parse_callback(callback: Callable[[], None]) -> None: """Adds a parse callback, to be invoked when option parsing is done. See `OptionParser.add_parse_callback` """ options.add_parse_callback(callback) # Default options define_logging_options(options) tornado-6.1.0/tornado/platform/000077500000000000000000000000001374705040500164635ustar00rootroot00000000000000tornado-6.1.0/tornado/platform/__init__.py000066400000000000000000000000001374705040500205620ustar00rootroot00000000000000tornado-6.1.0/tornado/platform/asyncio.py000066400000000000000000000552601374705040500205120ustar00rootroot00000000000000"""Bridges between the `asyncio` module and Tornado IOLoop. .. versionadded:: 3.2 This module integrates Tornado with the ``asyncio`` module introduced in Python 3.4. This makes it possible to combine the two libraries on the same event loop. .. deprecated:: 5.0 While the code in this module is still used, it is now enabled automatically when `asyncio` is available, so applications should no longer need to refer to this module directly. .. note:: Tornado is designed to use a selector-based event loop. On Windows, where a proactor-based event loop has been the default since Python 3.8, a selector event loop is emulated by running ``select`` on a separate thread. Configuring ``asyncio`` to use a selector event loop may improve performance of Tornado (but may reduce performance of other ``asyncio``-based libraries in the same process). """ import asyncio import atexit import concurrent.futures import errno import functools import select import socket import sys import threading import typing from tornado.gen import convert_yielded from tornado.ioloop import IOLoop, _Selectable from typing import Any, TypeVar, Awaitable, Callable, Union, Optional, List, Tuple, Dict if typing.TYPE_CHECKING: from typing import Set # noqa: F401 from typing_extensions import Protocol class _HasFileno(Protocol): def fileno(self) -> int: pass _FileDescriptorLike = Union[int, _HasFileno] _T = TypeVar("_T") # Collection of selector thread event loops to shut down on exit. _selector_loops = set() # type: Set[AddThreadSelectorEventLoop] def _atexit_callback() -> None: for loop in _selector_loops: with loop._select_cond: loop._closing_selector = True loop._select_cond.notify() try: loop._waker_w.send(b"a") except BlockingIOError: pass # If we don't join our (daemon) thread here, we may get a deadlock # during interpreter shutdown. I don't really understand why. This # deadlock happens every time in CI (both travis and appveyor) but # I've never been able to reproduce locally. loop._thread.join() _selector_loops.clear() atexit.register(_atexit_callback) class BaseAsyncIOLoop(IOLoop): def initialize( # type: ignore self, asyncio_loop: asyncio.AbstractEventLoop, **kwargs: Any ) -> None: # asyncio_loop is always the real underlying IOLoop. This is used in # ioloop.py to maintain the asyncio-to-ioloop mappings. self.asyncio_loop = asyncio_loop # selector_loop is an event loop that implements the add_reader family of # methods. Usually the same as asyncio_loop but differs on platforms such # as windows where the default event loop does not implement these methods. self.selector_loop = asyncio_loop if hasattr(asyncio, "ProactorEventLoop") and isinstance( asyncio_loop, asyncio.ProactorEventLoop # type: ignore ): # Ignore this line for mypy because the abstract method checker # doesn't understand dynamic proxies. self.selector_loop = AddThreadSelectorEventLoop(asyncio_loop) # type: ignore # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler) self.handlers = {} # type: Dict[int, Tuple[Union[int, _Selectable], Callable]] # Set of fds listening for reads/writes self.readers = set() # type: Set[int] self.writers = set() # type: Set[int] self.closing = False # If an asyncio loop was closed through an asyncio interface # instead of IOLoop.close(), we'd never hear about it and may # have left a dangling reference in our map. In case an # application (or, more likely, a test suite) creates and # destroys a lot of event loops in this way, check here to # ensure that we don't have a lot of dead loops building up in # the map. # # TODO(bdarnell): consider making self.asyncio_loop a weakref # for AsyncIOMainLoop and make _ioloop_for_asyncio a # WeakKeyDictionary. for loop in list(IOLoop._ioloop_for_asyncio): if loop.is_closed(): del IOLoop._ioloop_for_asyncio[loop] IOLoop._ioloop_for_asyncio[asyncio_loop] = self self._thread_identity = 0 super().initialize(**kwargs) def assign_thread_identity() -> None: self._thread_identity = threading.get_ident() self.add_callback(assign_thread_identity) def close(self, all_fds: bool = False) -> None: self.closing = True for fd in list(self.handlers): fileobj, handler_func = self.handlers[fd] self.remove_handler(fd) if all_fds: self.close_fd(fileobj) # Remove the mapping before closing the asyncio loop. If this # happened in the other order, we could race against another # initialize() call which would see the closed asyncio loop, # assume it was closed from the asyncio side, and do this # cleanup for us, leading to a KeyError. del IOLoop._ioloop_for_asyncio[self.asyncio_loop] if self.selector_loop is not self.asyncio_loop: self.selector_loop.close() self.asyncio_loop.close() def add_handler( self, fd: Union[int, _Selectable], handler: Callable[..., None], events: int ) -> None: fd, fileobj = self.split_fd(fd) if fd in self.handlers: raise ValueError("fd %s added twice" % fd) self.handlers[fd] = (fileobj, handler) if events & IOLoop.READ: self.selector_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ) self.readers.add(fd) if events & IOLoop.WRITE: self.selector_loop.add_writer(fd, self._handle_events, fd, IOLoop.WRITE) self.writers.add(fd) def update_handler(self, fd: Union[int, _Selectable], events: int) -> None: fd, fileobj = self.split_fd(fd) if events & IOLoop.READ: if fd not in self.readers: self.selector_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ) self.readers.add(fd) else: if fd in self.readers: self.selector_loop.remove_reader(fd) self.readers.remove(fd) if events & IOLoop.WRITE: if fd not in self.writers: self.selector_loop.add_writer(fd, self._handle_events, fd, IOLoop.WRITE) self.writers.add(fd) else: if fd in self.writers: self.selector_loop.remove_writer(fd) self.writers.remove(fd) def remove_handler(self, fd: Union[int, _Selectable]) -> None: fd, fileobj = self.split_fd(fd) if fd not in self.handlers: return if fd in self.readers: self.selector_loop.remove_reader(fd) self.readers.remove(fd) if fd in self.writers: self.selector_loop.remove_writer(fd) self.writers.remove(fd) del self.handlers[fd] def _handle_events(self, fd: int, events: int) -> None: fileobj, handler_func = self.handlers[fd] handler_func(fileobj, events) def start(self) -> None: try: old_loop = asyncio.get_event_loop() except (RuntimeError, AssertionError): old_loop = None # type: ignore try: self._setup_logging() asyncio.set_event_loop(self.asyncio_loop) self.asyncio_loop.run_forever() finally: asyncio.set_event_loop(old_loop) def stop(self) -> None: self.asyncio_loop.stop() def call_at( self, when: float, callback: Callable[..., None], *args: Any, **kwargs: Any ) -> object: # asyncio.call_at supports *args but not **kwargs, so bind them here. # We do not synchronize self.time and asyncio_loop.time, so # convert from absolute to relative. return self.asyncio_loop.call_later( max(0, when - self.time()), self._run_callback, functools.partial(callback, *args, **kwargs), ) def remove_timeout(self, timeout: object) -> None: timeout.cancel() # type: ignore def add_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None: if threading.get_ident() == self._thread_identity: call_soon = self.asyncio_loop.call_soon else: call_soon = self.asyncio_loop.call_soon_threadsafe try: call_soon(self._run_callback, functools.partial(callback, *args, **kwargs)) except RuntimeError: # "Event loop is closed". Swallow the exception for # consistency with PollIOLoop (and logical consistency # with the fact that we can't guarantee that an # add_callback that completes without error will # eventually execute). pass except AttributeError: # ProactorEventLoop may raise this instead of RuntimeError # if call_soon_threadsafe races with a call to close(). # Swallow it too for consistency. pass def add_callback_from_signal( self, callback: Callable, *args: Any, **kwargs: Any ) -> None: try: self.asyncio_loop.call_soon_threadsafe( self._run_callback, functools.partial(callback, *args, **kwargs) ) except RuntimeError: pass def run_in_executor( self, executor: Optional[concurrent.futures.Executor], func: Callable[..., _T], *args: Any ) -> Awaitable[_T]: return self.asyncio_loop.run_in_executor(executor, func, *args) def set_default_executor(self, executor: concurrent.futures.Executor) -> None: return self.asyncio_loop.set_default_executor(executor) class AsyncIOMainLoop(BaseAsyncIOLoop): """``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the current ``asyncio`` event loop (i.e. the one returned by ``asyncio.get_event_loop()``). .. deprecated:: 5.0 Now used automatically when appropriate; it is no longer necessary to refer to this class directly. .. versionchanged:: 5.0 Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop. """ def initialize(self, **kwargs: Any) -> None: # type: ignore super().initialize(asyncio.get_event_loop(), **kwargs) def make_current(self) -> None: # AsyncIOMainLoop already refers to the current asyncio loop so # nothing to do here. pass class AsyncIOLoop(BaseAsyncIOLoop): """``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop. This class follows the usual Tornado semantics for creating new ``IOLoops``; these loops are not necessarily related to the ``asyncio`` default event loop. Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object can be accessed with the ``asyncio_loop`` attribute. .. versionchanged:: 5.0 When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets the current `asyncio` event loop. .. deprecated:: 5.0 Now used automatically when appropriate; it is no longer necessary to refer to this class directly. """ def initialize(self, **kwargs: Any) -> None: # type: ignore self.is_current = False loop = asyncio.new_event_loop() try: super().initialize(loop, **kwargs) except Exception: # If initialize() does not succeed (taking ownership of the loop), # we have to close it. loop.close() raise def close(self, all_fds: bool = False) -> None: if self.is_current: self.clear_current() super().close(all_fds=all_fds) def make_current(self) -> None: if not self.is_current: try: self.old_asyncio = asyncio.get_event_loop() except (RuntimeError, AssertionError): self.old_asyncio = None # type: ignore self.is_current = True asyncio.set_event_loop(self.asyncio_loop) def _clear_current_hook(self) -> None: if self.is_current: asyncio.set_event_loop(self.old_asyncio) self.is_current = False def to_tornado_future(asyncio_future: asyncio.Future) -> asyncio.Future: """Convert an `asyncio.Future` to a `tornado.concurrent.Future`. .. versionadded:: 4.1 .. deprecated:: 5.0 Tornado ``Futures`` have been merged with `asyncio.Future`, so this method is now a no-op. """ return asyncio_future def to_asyncio_future(tornado_future: asyncio.Future) -> asyncio.Future: """Convert a Tornado yieldable object to an `asyncio.Future`. .. versionadded:: 4.1 .. versionchanged:: 4.3 Now accepts any yieldable object, not just `tornado.concurrent.Future`. .. deprecated:: 5.0 Tornado ``Futures`` have been merged with `asyncio.Future`, so this method is now equivalent to `tornado.gen.convert_yielded`. """ return convert_yielded(tornado_future) if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"): # "Any thread" and "selector" should be orthogonal, but there's not a clean # interface for composing policies so pick the right base. _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore else: _BasePolicy = asyncio.DefaultEventLoopPolicy class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore """Event loop policy that allows loop creation on any thread. The default `asyncio` event loop policy only automatically creates event loops in the main threads. Other threads must create event loops explicitly or `asyncio.get_event_loop` (and therefore `.IOLoop.current`) will fail. Installing this policy allows event loops to be created automatically on any thread, matching the behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2). Usage:: asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) .. versionadded:: 5.0 """ def get_event_loop(self) -> asyncio.AbstractEventLoop: try: return super().get_event_loop() except (RuntimeError, AssertionError): # This was an AssertionError in Python 3.4.2 (which ships with Debian Jessie) # and changed to a RuntimeError in 3.4.3. # "There is no current event loop in thread %r" loop = self.new_event_loop() self.set_event_loop(loop) return loop class AddThreadSelectorEventLoop(asyncio.AbstractEventLoop): """Wrap an event loop to add implementations of the ``add_reader`` method family. Instances of this class start a second thread to run a selector. This thread is completely hidden from the user; all callbacks are run on the wrapped event loop's thread. This class is used automatically by Tornado; applications should not need to refer to it directly. It is safe to wrap any event loop with this class, although it only makes sense for event loops that do not implement the ``add_reader`` family of methods themselves (i.e. ``WindowsProactorEventLoop``) Closing the ``AddThreadSelectorEventLoop`` also closes the wrapped event loop. """ # This class is a __getattribute__-based proxy. All attributes other than those # in this set are proxied through to the underlying loop. MY_ATTRIBUTES = { "_consume_waker", "_select_cond", "_select_args", "_closing_selector", "_thread", "_handle_event", "_readers", "_real_loop", "_start_select", "_run_select", "_handle_select", "_wake_selector", "_waker_r", "_waker_w", "_writers", "add_reader", "add_writer", "close", "remove_reader", "remove_writer", } def __getattribute__(self, name: str) -> Any: if name in AddThreadSelectorEventLoop.MY_ATTRIBUTES: return super().__getattribute__(name) return getattr(self._real_loop, name) def __init__(self, real_loop: asyncio.AbstractEventLoop) -> None: self._real_loop = real_loop # Create a thread to run the select system call. We manage this thread # manually so we can trigger a clean shutdown from an atexit hook. Note # that due to the order of operations at shutdown, only daemon threads # can be shut down in this way (non-daemon threads would require the # introduction of a new hook: https://bugs.python.org/issue41962) self._select_cond = threading.Condition() self._select_args = ( None ) # type: Optional[Tuple[List[_FileDescriptorLike], List[_FileDescriptorLike]]] self._closing_selector = False self._thread = threading.Thread( name="Tornado selector", daemon=True, target=self._run_select, ) self._thread.start() # Start the select loop once the loop is started. self._real_loop.call_soon(self._start_select) self._readers = {} # type: Dict[_FileDescriptorLike, Callable] self._writers = {} # type: Dict[_FileDescriptorLike, Callable] # Writing to _waker_w will wake up the selector thread, which # watches for _waker_r to be readable. self._waker_r, self._waker_w = socket.socketpair() self._waker_r.setblocking(False) self._waker_w.setblocking(False) _selector_loops.add(self) self.add_reader(self._waker_r, self._consume_waker) def __del__(self) -> None: # If the top-level application code uses asyncio interfaces to # start and stop the event loop, no objects created in Tornado # can get a clean shutdown notification. If we're just left to # be GC'd, we must explicitly close our sockets to avoid # logging warnings. _selector_loops.discard(self) self._waker_r.close() self._waker_w.close() def close(self) -> None: with self._select_cond: self._closing_selector = True self._select_cond.notify() self._wake_selector() self._thread.join() _selector_loops.discard(self) self._waker_r.close() self._waker_w.close() self._real_loop.close() def _wake_selector(self) -> None: try: self._waker_w.send(b"a") except BlockingIOError: pass def _consume_waker(self) -> None: try: self._waker_r.recv(1024) except BlockingIOError: pass def _start_select(self) -> None: # Capture reader and writer sets here in the event loop # thread to avoid any problems with concurrent # modification while the select loop uses them. with self._select_cond: assert self._select_args is None self._select_args = (list(self._readers.keys()), list(self._writers.keys())) self._select_cond.notify() def _run_select(self) -> None: while True: with self._select_cond: while self._select_args is None and not self._closing_selector: self._select_cond.wait() if self._closing_selector: return assert self._select_args is not None to_read, to_write = self._select_args self._select_args = None # We use the simpler interface of the select module instead of # the more stateful interface in the selectors module because # this class is only intended for use on windows, where # select.select is the only option. The selector interface # does not have well-documented thread-safety semantics that # we can rely on so ensuring proper synchronization would be # tricky. try: # On windows, selecting on a socket for write will not # return the socket when there is an error (but selecting # for reads works). Also select for errors when selecting # for writes, and merge the results. # # This pattern is also used in # https://github.com/python/cpython/blob/v3.8.0/Lib/selectors.py#L312-L317 rs, ws, xs = select.select(to_read, to_write, to_write) ws = ws + xs except OSError as e: # After remove_reader or remove_writer is called, the file # descriptor may subsequently be closed on the event loop # thread. It's possible that this select thread hasn't # gotten into the select system call by the time that # happens in which case (at least on macOS), select may # raise a "bad file descriptor" error. If we get that # error, check and see if we're also being woken up by # polling the waker alone. If we are, just return to the # event loop and we'll get the updated set of file # descriptors on the next iteration. Otherwise, raise the # original error. if e.errno == getattr(errno, "WSAENOTSOCK", errno.EBADF): rs, _, _ = select.select([self._waker_r.fileno()], [], [], 0) if rs: ws = [] else: raise else: raise self._real_loop.call_soon_threadsafe(self._handle_select, rs, ws) def _handle_select( self, rs: List["_FileDescriptorLike"], ws: List["_FileDescriptorLike"] ) -> None: for r in rs: self._handle_event(r, self._readers) for w in ws: self._handle_event(w, self._writers) self._start_select() def _handle_event( self, fd: "_FileDescriptorLike", cb_map: Dict["_FileDescriptorLike", Callable], ) -> None: try: callback = cb_map[fd] except KeyError: return callback() def add_reader( self, fd: "_FileDescriptorLike", callback: Callable[..., None], *args: Any ) -> None: self._readers[fd] = functools.partial(callback, *args) self._wake_selector() def add_writer( self, fd: "_FileDescriptorLike", callback: Callable[..., None], *args: Any ) -> None: self._writers[fd] = functools.partial(callback, *args) self._wake_selector() def remove_reader(self, fd: "_FileDescriptorLike") -> None: del self._readers[fd] self._wake_selector() def remove_writer(self, fd: "_FileDescriptorLike") -> None: del self._writers[fd] self._wake_selector() tornado-6.1.0/tornado/platform/caresresolver.py000066400000000000000000000063661374705040500217270ustar00rootroot00000000000000import pycares # type: ignore import socket from tornado.concurrent import Future from tornado import gen from tornado.ioloop import IOLoop from tornado.netutil import Resolver, is_valid_ip import typing if typing.TYPE_CHECKING: from typing import Generator, Any, List, Tuple, Dict # noqa: F401 class CaresResolver(Resolver): """Name resolver based on the c-ares library. This is a non-blocking and non-threaded resolver. It may not produce the same results as the system resolver, but can be used for non-blocking resolution when threads cannot be used. c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``, so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is the default for ``tornado.simple_httpclient``, but other libraries may default to ``AF_UNSPEC``. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ def initialize(self) -> None: self.io_loop = IOLoop.current() self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb) self.fds = {} # type: Dict[int, int] def _sock_state_cb(self, fd: int, readable: bool, writable: bool) -> None: state = (IOLoop.READ if readable else 0) | (IOLoop.WRITE if writable else 0) if not state: self.io_loop.remove_handler(fd) del self.fds[fd] elif fd in self.fds: self.io_loop.update_handler(fd, state) self.fds[fd] = state else: self.io_loop.add_handler(fd, self._handle_events, state) self.fds[fd] = state def _handle_events(self, fd: int, events: int) -> None: read_fd = pycares.ARES_SOCKET_BAD write_fd = pycares.ARES_SOCKET_BAD if events & IOLoop.READ: read_fd = fd if events & IOLoop.WRITE: write_fd = fd self.channel.process_fd(read_fd, write_fd) @gen.coroutine def resolve( self, host: str, port: int, family: int = 0 ) -> "Generator[Any, Any, List[Tuple[int, Any]]]": if is_valid_ip(host): addresses = [host] else: # gethostbyname doesn't take callback as a kwarg fut = Future() # type: Future[Tuple[Any, Any]] self.channel.gethostbyname( host, family, lambda result, error: fut.set_result((result, error)) ) result, error = yield fut if error: raise IOError( "C-Ares returned error %s: %s while resolving %s" % (error, pycares.errno.strerror(error), host) ) addresses = result.addresses addrinfo = [] for address in addresses: if "." in address: address_family = socket.AF_INET elif ":" in address: address_family = socket.AF_INET6 else: address_family = socket.AF_UNSPEC if family != socket.AF_UNSPEC and family != address_family: raise IOError( "Requested socket family %d but got %d" % (family, address_family) ) addrinfo.append((typing.cast(int, address_family), (address, port))) return addrinfo tornado-6.1.0/tornado/platform/twisted.py000066400000000000000000000125451374705040500205270ustar00rootroot00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Bridges between the Twisted package and Tornado. """ import socket import sys import twisted.internet.abstract # type: ignore import twisted.internet.asyncioreactor # type: ignore from twisted.internet.defer import Deferred # type: ignore from twisted.python import failure # type: ignore import twisted.names.cache # type: ignore import twisted.names.client # type: ignore import twisted.names.hosts # type: ignore import twisted.names.resolve # type: ignore from tornado.concurrent import Future, future_set_exc_info from tornado.escape import utf8 from tornado import gen from tornado.netutil import Resolver import typing if typing.TYPE_CHECKING: from typing import Generator, Any, List, Tuple # noqa: F401 class TwistedResolver(Resolver): """Twisted-based asynchronous resolver. This is a non-blocking and non-threaded resolver. It is recommended only when threads cannot be used, since it has limitations compared to the standard ``getaddrinfo``-based `~tornado.netutil.Resolver` and `~tornado.netutil.DefaultExecutorResolver`. Specifically, it returns at most one result, and arguments other than ``host`` and ``family`` are ignored. It may fail to resolve when ``family`` is not ``socket.AF_UNSPEC``. Requires Twisted 12.1 or newer. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ def initialize(self) -> None: # partial copy of twisted.names.client.createResolver, which doesn't # allow for a reactor to be passed in. self.reactor = twisted.internet.asyncioreactor.AsyncioSelectorReactor() host_resolver = twisted.names.hosts.Resolver("/etc/hosts") cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor) real_resolver = twisted.names.client.Resolver( "/etc/resolv.conf", reactor=self.reactor ) self.resolver = twisted.names.resolve.ResolverChain( [host_resolver, cache_resolver, real_resolver] ) @gen.coroutine def resolve( self, host: str, port: int, family: int = 0 ) -> "Generator[Any, Any, List[Tuple[int, Any]]]": # getHostByName doesn't accept IP addresses, so if the input # looks like an IP address just return it immediately. if twisted.internet.abstract.isIPAddress(host): resolved = host resolved_family = socket.AF_INET elif twisted.internet.abstract.isIPv6Address(host): resolved = host resolved_family = socket.AF_INET6 else: deferred = self.resolver.getHostByName(utf8(host)) fut = Future() # type: Future[Any] deferred.addBoth(fut.set_result) resolved = yield fut if isinstance(resolved, failure.Failure): try: resolved.raiseException() except twisted.names.error.DomainError as e: raise IOError(e) elif twisted.internet.abstract.isIPAddress(resolved): resolved_family = socket.AF_INET elif twisted.internet.abstract.isIPv6Address(resolved): resolved_family = socket.AF_INET6 else: resolved_family = socket.AF_UNSPEC if family != socket.AF_UNSPEC and family != resolved_family: raise Exception( "Requested socket family %d but got %d" % (family, resolved_family) ) result = [(typing.cast(int, resolved_family), (resolved, port))] return result def install() -> None: """Install ``AsyncioSelectorReactor`` as the default Twisted reactor. .. deprecated:: 5.1 This function is provided for backwards compatibility; code that does not require compatibility with older versions of Tornado should use ``twisted.internet.asyncioreactor.install()`` directly. .. versionchanged:: 6.0.3 In Tornado 5.x and before, this function installed a reactor based on the Tornado ``IOLoop``. When that reactor implementation was removed in Tornado 6.0.0, this function was removed as well. It was restored in Tornado 6.0.3 using the ``asyncio`` reactor instead. """ from twisted.internet.asyncioreactor import install install() if hasattr(gen.convert_yielded, "register"): @gen.convert_yielded.register(Deferred) # type: ignore def _(d: Deferred) -> Future: f = Future() # type: Future[Any] def errback(failure: failure.Failure) -> None: try: failure.raiseException() # Should never happen, but just in case raise Exception("errback called without error") except: future_set_exc_info(f, sys.exc_info()) d.addCallbacks(f.set_result, errback) return f tornado-6.1.0/tornado/process.py000066400000000000000000000307651374705040500167020ustar00rootroot00000000000000# # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for working with multiple processes, including both forking the server into multiple processes and managing subprocesses. """ import os import multiprocessing import signal import subprocess import sys import time from binascii import hexlify from tornado.concurrent import ( Future, future_set_result_unless_cancelled, future_set_exception_unless_cancelled, ) from tornado import ioloop from tornado.iostream import PipeIOStream from tornado.log import gen_log import typing from typing import Optional, Any, Callable if typing.TYPE_CHECKING: from typing import List # noqa: F401 # Re-export this exception for convenience. CalledProcessError = subprocess.CalledProcessError def cpu_count() -> int: """Returns the number of processors on this machine.""" if multiprocessing is None: return 1 try: return multiprocessing.cpu_count() except NotImplementedError: pass try: return os.sysconf("SC_NPROCESSORS_CONF") # type: ignore except (AttributeError, ValueError): pass gen_log.error("Could not detect number of processors; assuming 1") return 1 def _reseed_random() -> None: if "random" not in sys.modules: return import random # If os.urandom is available, this method does the same thing as # random.seed (at least as of python 2.6). If os.urandom is not # available, we mix in the pid in addition to a timestamp. try: seed = int(hexlify(os.urandom(16)), 16) except NotImplementedError: seed = int(time.time() * 1000) ^ os.getpid() random.seed(seed) _task_id = None def fork_processes( num_processes: Optional[int], max_restarts: Optional[int] = None ) -> int: """Starts multiple worker processes. If ``num_processes`` is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If ``num_processes`` is given and > 0, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``fork_processes``. In each child process, ``fork_processes`` returns its *task id*, a number between 0 and ``num_processes``. Processes that exit abnormally (due to a signal or non-zero exit status) are restarted with the same id (up to ``max_restarts`` times). In the parent process, ``fork_processes`` calls ``sys.exit(0)`` after all child processes have exited normally. max_restarts defaults to 100. Availability: Unix """ if sys.platform == "win32": # The exact form of this condition matters to mypy; it understands # if but not assert in this context. raise Exception("fork not available on windows") if max_restarts is None: max_restarts = 100 global _task_id assert _task_id is None if num_processes is None or num_processes <= 0: num_processes = cpu_count() gen_log.info("Starting %d processes", num_processes) children = {} def start_child(i: int) -> Optional[int]: pid = os.fork() if pid == 0: # child process _reseed_random() global _task_id _task_id = i return i else: children[pid] = i return None for i in range(num_processes): id = start_child(i) if id is not None: return id num_restarts = 0 while children: pid, status = os.wait() if pid not in children: continue id = children.pop(pid) if os.WIFSIGNALED(status): gen_log.warning( "child %d (pid %d) killed by signal %d, restarting", id, pid, os.WTERMSIG(status), ) elif os.WEXITSTATUS(status) != 0: gen_log.warning( "child %d (pid %d) exited with status %d, restarting", id, pid, os.WEXITSTATUS(status), ) else: gen_log.info("child %d (pid %d) exited normally", id, pid) continue num_restarts += 1 if num_restarts > max_restarts: raise RuntimeError("Too many child restarts, giving up") new_id = start_child(id) if new_id is not None: return new_id # All child processes exited cleanly, so exit the master process # instead of just returning to right after the call to # fork_processes (which will probably just start up another IOLoop # unless the caller checks the return value). sys.exit(0) def task_id() -> Optional[int]: """Returns the current task id, if any. Returns None if this process was not created by `fork_processes`. """ global _task_id return _task_id class Subprocess(object): """Wraps ``subprocess.Popen`` with IOStream support. The constructor is the same as ``subprocess.Popen`` with the following additions: * ``stdin``, ``stdout``, and ``stderr`` may have the value ``tornado.process.Subprocess.STREAM``, which will make the corresponding attribute of the resulting Subprocess a `.PipeIOStream`. If this option is used, the caller is responsible for closing the streams when done with them. The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and ``wait_for_exit`` methods do not work on Windows. There is therefore no reason to use this class instead of ``subprocess.Popen`` on that platform. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ STREAM = object() _initialized = False _waiting = {} # type: ignore _old_sigchld = None def __init__(self, *args: Any, **kwargs: Any) -> None: self.io_loop = ioloop.IOLoop.current() # All FDs we create should be closed on error; those in to_close # should be closed in the parent process on success. pipe_fds = [] # type: List[int] to_close = [] # type: List[int] if kwargs.get("stdin") is Subprocess.STREAM: in_r, in_w = os.pipe() kwargs["stdin"] = in_r pipe_fds.extend((in_r, in_w)) to_close.append(in_r) self.stdin = PipeIOStream(in_w) if kwargs.get("stdout") is Subprocess.STREAM: out_r, out_w = os.pipe() kwargs["stdout"] = out_w pipe_fds.extend((out_r, out_w)) to_close.append(out_w) self.stdout = PipeIOStream(out_r) if kwargs.get("stderr") is Subprocess.STREAM: err_r, err_w = os.pipe() kwargs["stderr"] = err_w pipe_fds.extend((err_r, err_w)) to_close.append(err_w) self.stderr = PipeIOStream(err_r) try: self.proc = subprocess.Popen(*args, **kwargs) except: for fd in pipe_fds: os.close(fd) raise for fd in to_close: os.close(fd) self.pid = self.proc.pid for attr in ["stdin", "stdout", "stderr"]: if not hasattr(self, attr): # don't clobber streams set above setattr(self, attr, getattr(self.proc, attr)) self._exit_callback = None # type: Optional[Callable[[int], None]] self.returncode = None # type: Optional[int] def set_exit_callback(self, callback: Callable[[int], None]) -> None: """Runs ``callback`` when this process exits. The callback takes one argument, the return code of the process. This method uses a ``SIGCHLD`` handler, which is a global setting and may conflict if you have other libraries trying to handle the same signal. If you are using more than one ``IOLoop`` it may be necessary to call `Subprocess.initialize` first to designate one ``IOLoop`` to run the signal handlers. In many cases a close callback on the stdout or stderr streams can be used as an alternative to an exit callback if the signal handler is causing a problem. Availability: Unix """ self._exit_callback = callback Subprocess.initialize() Subprocess._waiting[self.pid] = self Subprocess._try_cleanup_process(self.pid) def wait_for_exit(self, raise_error: bool = True) -> "Future[int]": """Returns a `.Future` which resolves when the process exits. Usage:: ret = yield proc.wait_for_exit() This is a coroutine-friendly alternative to `set_exit_callback` (and a replacement for the blocking `subprocess.Popen.wait`). By default, raises `subprocess.CalledProcessError` if the process has a non-zero exit status. Use ``wait_for_exit(raise_error=False)`` to suppress this behavior and return the exit status without raising. .. versionadded:: 4.2 Availability: Unix """ future = Future() # type: Future[int] def callback(ret: int) -> None: if ret != 0 and raise_error: # Unfortunately we don't have the original args any more. future_set_exception_unless_cancelled( future, CalledProcessError(ret, "unknown") ) else: future_set_result_unless_cancelled(future, ret) self.set_exit_callback(callback) return future @classmethod def initialize(cls) -> None: """Initializes the ``SIGCHLD`` handler. The signal handler is run on an `.IOLoop` to avoid locking issues. Note that the `.IOLoop` used for signal handling need not be the same one used by individual Subprocess objects (as long as the ``IOLoops`` are each running in separate threads). .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. Availability: Unix """ if cls._initialized: return io_loop = ioloop.IOLoop.current() cls._old_sigchld = signal.signal( signal.SIGCHLD, lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup), ) cls._initialized = True @classmethod def uninitialize(cls) -> None: """Removes the ``SIGCHLD`` handler.""" if not cls._initialized: return signal.signal(signal.SIGCHLD, cls._old_sigchld) cls._initialized = False @classmethod def _cleanup(cls) -> None: for pid in list(cls._waiting.keys()): # make a copy cls._try_cleanup_process(pid) @classmethod def _try_cleanup_process(cls, pid: int) -> None: try: ret_pid, status = os.waitpid(pid, os.WNOHANG) # type: ignore except ChildProcessError: return if ret_pid == 0: return assert ret_pid == pid subproc = cls._waiting.pop(pid) subproc.io_loop.add_callback_from_signal(subproc._set_returncode, status) def _set_returncode(self, status: int) -> None: if sys.platform == "win32": self.returncode = -1 else: if os.WIFSIGNALED(status): self.returncode = -os.WTERMSIG(status) else: assert os.WIFEXITED(status) self.returncode = os.WEXITSTATUS(status) # We've taken over wait() duty from the subprocess.Popen # object. If we don't inform it of the process's return code, # it will log a warning at destruction in python 3.6+. self.proc.returncode = self.returncode if self._exit_callback: callback = self._exit_callback self._exit_callback = None callback(self.returncode) tornado-6.1.0/tornado/py.typed000066400000000000000000000000001374705040500163240ustar00rootroot00000000000000tornado-6.1.0/tornado/queues.py000066400000000000000000000300011374705040500165120ustar00rootroot00000000000000# Copyright 2015 The Tornado Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Asynchronous queues for coroutines. These classes are very similar to those provided in the standard library's `asyncio package `_. .. warning:: Unlike the standard library's `queue` module, the classes defined here are *not* thread-safe. To use these queues from another thread, use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread before calling any queue methods. """ import collections import datetime import heapq from tornado import gen, ioloop from tornado.concurrent import Future, future_set_result_unless_cancelled from tornado.locks import Event from typing import Union, TypeVar, Generic, Awaitable, Optional import typing if typing.TYPE_CHECKING: from typing import Deque, Tuple, Any # noqa: F401 _T = TypeVar("_T") __all__ = ["Queue", "PriorityQueue", "LifoQueue", "QueueFull", "QueueEmpty"] class QueueEmpty(Exception): """Raised by `.Queue.get_nowait` when the queue has no items.""" pass class QueueFull(Exception): """Raised by `.Queue.put_nowait` when a queue is at its maximum size.""" pass def _set_timeout( future: Future, timeout: Union[None, float, datetime.timedelta] ) -> None: if timeout: def on_timeout() -> None: if not future.done(): future.set_exception(gen.TimeoutError()) io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) future.add_done_callback(lambda _: io_loop.remove_timeout(timeout_handle)) class _QueueIterator(Generic[_T]): def __init__(self, q: "Queue[_T]") -> None: self.q = q def __anext__(self) -> Awaitable[_T]: return self.q.get() class Queue(Generic[_T]): """Coordinate producer and consumer coroutines. If maxsize is 0 (the default) the queue size is unbounded. .. testcode:: from tornado import gen from tornado.ioloop import IOLoop from tornado.queues import Queue q = Queue(maxsize=2) async def consumer(): async for item in q: try: print('Doing work on %s' % item) await gen.sleep(0.01) finally: q.task_done() async def producer(): for item in range(5): await q.put(item) print('Put %s' % item) async def main(): # Start consumer without waiting (since it never finishes). IOLoop.current().spawn_callback(consumer) await producer() # Wait for producer to put all tasks. await q.join() # Wait for consumer to finish all tasks. print('Done') IOLoop.current().run_sync(main) .. testoutput:: Put 0 Put 1 Doing work on 0 Put 2 Doing work on 1 Put 3 Doing work on 2 Put 4 Doing work on 3 Doing work on 4 Done In versions of Python without native coroutines (before 3.5), ``consumer()`` could be written as:: @gen.coroutine def consumer(): while True: item = yield q.get() try: print('Doing work on %s' % item) yield gen.sleep(0.01) finally: q.task_done() .. versionchanged:: 4.3 Added ``async for`` support in Python 3.5. """ # Exact type depends on subclass. Could be another generic # parameter and use protocols to be more precise here. _queue = None # type: Any def __init__(self, maxsize: int = 0) -> None: if maxsize is None: raise TypeError("maxsize can't be None") if maxsize < 0: raise ValueError("maxsize can't be negative") self._maxsize = maxsize self._init() self._getters = collections.deque([]) # type: Deque[Future[_T]] self._putters = collections.deque([]) # type: Deque[Tuple[_T, Future[None]]] self._unfinished_tasks = 0 self._finished = Event() self._finished.set() @property def maxsize(self) -> int: """Number of items allowed in the queue.""" return self._maxsize def qsize(self) -> int: """Number of items in the queue.""" return len(self._queue) def empty(self) -> bool: return not self._queue def full(self) -> bool: if self.maxsize == 0: return False else: return self.qsize() >= self.maxsize def put( self, item: _T, timeout: Optional[Union[float, datetime.timedelta]] = None ) -> "Future[None]": """Put an item into the queue, perhaps waiting until there is room. Returns a Future, which raises `tornado.util.TimeoutError` after a timeout. ``timeout`` may be a number denoting a time (on the same scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time. """ future = Future() # type: Future[None] try: self.put_nowait(item) except QueueFull: self._putters.append((item, future)) _set_timeout(future, timeout) else: future.set_result(None) return future def put_nowait(self, item: _T) -> None: """Put an item into the queue without blocking. If no free slot is immediately available, raise `QueueFull`. """ self._consume_expired() if self._getters: assert self.empty(), "queue non-empty, why are getters waiting?" getter = self._getters.popleft() self.__put_internal(item) future_set_result_unless_cancelled(getter, self._get()) elif self.full(): raise QueueFull else: self.__put_internal(item) def get( self, timeout: Optional[Union[float, datetime.timedelta]] = None ) -> Awaitable[_T]: """Remove and return an item from the queue. Returns an awaitable which resolves once an item is available, or raises `tornado.util.TimeoutError` after a timeout. ``timeout`` may be a number denoting a time (on the same scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time. .. note:: The ``timeout`` argument of this method differs from that of the standard library's `queue.Queue.get`. That method interprets numeric values as relative timeouts; this one interprets them as absolute deadlines and requires ``timedelta`` objects for relative timeouts (consistent with other timeouts in Tornado). """ future = Future() # type: Future[_T] try: future.set_result(self.get_nowait()) except QueueEmpty: self._getters.append(future) _set_timeout(future, timeout) return future def get_nowait(self) -> _T: """Remove and return an item from the queue without blocking. Return an item if one is immediately available, else raise `QueueEmpty`. """ self._consume_expired() if self._putters: assert self.full(), "queue not full, why are putters waiting?" item, putter = self._putters.popleft() self.__put_internal(item) future_set_result_unless_cancelled(putter, None) return self._get() elif self.qsize(): return self._get() else: raise QueueEmpty def task_done(self) -> None: """Indicate that a formerly enqueued task is complete. Used by queue consumers. For each `.get` used to fetch a task, a subsequent call to `.task_done` tells the queue that the processing on the task is complete. If a `.join` is blocking, it resumes when all items have been processed; that is, when every `.put` is matched by a `.task_done`. Raises `ValueError` if called more times than `.put`. """ if self._unfinished_tasks <= 0: raise ValueError("task_done() called too many times") self._unfinished_tasks -= 1 if self._unfinished_tasks == 0: self._finished.set() def join( self, timeout: Optional[Union[float, datetime.timedelta]] = None ) -> Awaitable[None]: """Block until all items in the queue are processed. Returns an awaitable, which raises `tornado.util.TimeoutError` after a timeout. """ return self._finished.wait(timeout) def __aiter__(self) -> _QueueIterator[_T]: return _QueueIterator(self) # These three are overridable in subclasses. def _init(self) -> None: self._queue = collections.deque() def _get(self) -> _T: return self._queue.popleft() def _put(self, item: _T) -> None: self._queue.append(item) # End of the overridable methods. def __put_internal(self, item: _T) -> None: self._unfinished_tasks += 1 self._finished.clear() self._put(item) def _consume_expired(self) -> None: # Remove timed-out waiters. while self._putters and self._putters[0][1].done(): self._putters.popleft() while self._getters and self._getters[0].done(): self._getters.popleft() def __repr__(self) -> str: return "<%s at %s %s>" % (type(self).__name__, hex(id(self)), self._format()) def __str__(self) -> str: return "<%s %s>" % (type(self).__name__, self._format()) def _format(self) -> str: result = "maxsize=%r" % (self.maxsize,) if getattr(self, "_queue", None): result += " queue=%r" % self._queue if self._getters: result += " getters[%s]" % len(self._getters) if self._putters: result += " putters[%s]" % len(self._putters) if self._unfinished_tasks: result += " tasks=%s" % self._unfinished_tasks return result class PriorityQueue(Queue): """A `.Queue` that retrieves entries in priority order, lowest first. Entries are typically tuples like ``(priority number, data)``. .. testcode:: from tornado.queues import PriorityQueue q = PriorityQueue() q.put((1, 'medium-priority item')) q.put((0, 'high-priority item')) q.put((10, 'low-priority item')) print(q.get_nowait()) print(q.get_nowait()) print(q.get_nowait()) .. testoutput:: (0, 'high-priority item') (1, 'medium-priority item') (10, 'low-priority item') """ def _init(self) -> None: self._queue = [] def _put(self, item: _T) -> None: heapq.heappush(self._queue, item) def _get(self) -> _T: return heapq.heappop(self._queue) class LifoQueue(Queue): """A `.Queue` that retrieves the most recently put items first. .. testcode:: from tornado.queues import LifoQueue q = LifoQueue() q.put(3) q.put(2) q.put(1) print(q.get_nowait()) print(q.get_nowait()) print(q.get_nowait()) .. testoutput:: 1 2 3 """ def _init(self) -> None: self._queue = [] def _put(self, item: _T) -> None: self._queue.append(item) def _get(self) -> _T: return self._queue.pop() tornado-6.1.0/tornado/routing.py000066400000000000000000000607721374705040500167140ustar00rootroot00000000000000# Copyright 2015 The Tornado Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Flexible routing implementation. Tornado routes HTTP requests to appropriate handlers using `Router` class implementations. The `tornado.web.Application` class is a `Router` implementation and may be used directly, or the classes in this module may be used for additional flexibility. The `RuleRouter` class can match on more criteria than `.Application`, or the `Router` interface can be subclassed for maximum customization. `Router` interface extends `~.httputil.HTTPServerConnectionDelegate` to provide additional routing capabilities. This also means that any `Router` implementation can be used directly as a ``request_callback`` for `~.httpserver.HTTPServer` constructor. `Router` subclass must implement a ``find_handler`` method to provide a suitable `~.httputil.HTTPMessageDelegate` instance to handle the request: .. code-block:: python class CustomRouter(Router): def find_handler(self, request, **kwargs): # some routing logic providing a suitable HTTPMessageDelegate instance return MessageDelegate(request.connection) class MessageDelegate(HTTPMessageDelegate): def __init__(self, connection): self.connection = connection def finish(self): self.connection.write_headers( ResponseStartLine("HTTP/1.1", 200, "OK"), HTTPHeaders({"Content-Length": "2"}), b"OK") self.connection.finish() router = CustomRouter() server = HTTPServer(router) The main responsibility of `Router` implementation is to provide a mapping from a request to `~.httputil.HTTPMessageDelegate` instance that will handle this request. In the example above we can see that routing is possible even without instantiating an `~.web.Application`. For routing to `~.web.RequestHandler` implementations we need an `~.web.Application` instance. `~.web.Application.get_handler_delegate` provides a convenient way to create `~.httputil.HTTPMessageDelegate` for a given request and `~.web.RequestHandler`. Here is a simple example of how we can we route to `~.web.RequestHandler` subclasses by HTTP method: .. code-block:: python resources = {} class GetResource(RequestHandler): def get(self, path): if path not in resources: raise HTTPError(404) self.finish(resources[path]) class PostResource(RequestHandler): def post(self, path): resources[path] = self.request.body class HTTPMethodRouter(Router): def __init__(self, app): self.app = app def find_handler(self, request, **kwargs): handler = GetResource if request.method == "GET" else PostResource return self.app.get_handler_delegate(request, handler, path_args=[request.path]) router = HTTPMethodRouter(Application()) server = HTTPServer(router) `ReversibleRouter` interface adds the ability to distinguish between the routes and reverse them to the original urls using route's name and additional arguments. `~.web.Application` is itself an implementation of `ReversibleRouter` class. `RuleRouter` and `ReversibleRuleRouter` are implementations of `Router` and `ReversibleRouter` interfaces and can be used for creating rule-based routing configurations. Rules are instances of `Rule` class. They contain a `Matcher`, which provides the logic for determining whether the rule is a match for a particular request and a target, which can be one of the following. 1) An instance of `~.httputil.HTTPServerConnectionDelegate`: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/handler"), ConnectionDelegate()), # ... more rules ]) class ConnectionDelegate(HTTPServerConnectionDelegate): def start_request(self, server_conn, request_conn): return MessageDelegate(request_conn) 2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/callable"), request_callable) ]) def request_callable(request): request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK") request.finish() 3) Another `Router` instance: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/router.*"), CustomRouter()) ]) Of course a nested `RuleRouter` or a `~.web.Application` is allowed: .. code-block:: python router = RuleRouter([ Rule(HostMatches("example.com"), RuleRouter([ Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)])), ])) ]) server = HTTPServer(router) In the example below `RuleRouter` is used to route between applications: .. code-block:: python app1 = Application([ (r"/app1/handler", Handler1), # other handlers ... ]) app2 = Application([ (r"/app2/handler", Handler2), # other handlers ... ]) router = RuleRouter([ Rule(PathMatches("/app1.*"), app1), Rule(PathMatches("/app2.*"), app2) ]) server = HTTPServer(router) For more information on application-level routing see docs for `~.web.Application`. .. versionadded:: 4.5 """ import re from functools import partial from tornado import httputil from tornado.httpserver import _CallableAdapter from tornado.escape import url_escape, url_unescape, utf8 from tornado.log import app_log from tornado.util import basestring_type, import_object, re_unescape, unicode_type from typing import Any, Union, Optional, Awaitable, List, Dict, Pattern, Tuple, overload class Router(httputil.HTTPServerConnectionDelegate): """Abstract router interface.""" def find_handler( self, request: httputil.HTTPServerRequest, **kwargs: Any ) -> Optional[httputil.HTTPMessageDelegate]: """Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate` that can serve the request. Routing implementations may pass additional kwargs to extend the routing logic. :arg httputil.HTTPServerRequest request: current HTTP request. :arg kwargs: additional keyword arguments passed by routing implementation. :returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to process the request. """ raise NotImplementedError() def start_request( self, server_conn: object, request_conn: httputil.HTTPConnection ) -> httputil.HTTPMessageDelegate: return _RoutingDelegate(self, server_conn, request_conn) class ReversibleRouter(Router): """Abstract router interface for routers that can handle named routes and support reversing them to original urls. """ def reverse_url(self, name: str, *args: Any) -> Optional[str]: """Returns url string for a given route name and arguments or ``None`` if no match is found. :arg str name: route name. :arg args: url parameters. :returns: parametrized url string for a given route name (or ``None``). """ raise NotImplementedError() class _RoutingDelegate(httputil.HTTPMessageDelegate): def __init__( self, router: Router, server_conn: object, request_conn: httputil.HTTPConnection ) -> None: self.server_conn = server_conn self.request_conn = request_conn self.delegate = None # type: Optional[httputil.HTTPMessageDelegate] self.router = router # type: Router def headers_received( self, start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], headers: httputil.HTTPHeaders, ) -> Optional[Awaitable[None]]: assert isinstance(start_line, httputil.RequestStartLine) request = httputil.HTTPServerRequest( connection=self.request_conn, server_connection=self.server_conn, start_line=start_line, headers=headers, ) self.delegate = self.router.find_handler(request) if self.delegate is None: app_log.debug( "Delegate for %s %s request not found", start_line.method, start_line.path, ) self.delegate = _DefaultMessageDelegate(self.request_conn) return self.delegate.headers_received(start_line, headers) def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]: assert self.delegate is not None return self.delegate.data_received(chunk) def finish(self) -> None: assert self.delegate is not None self.delegate.finish() def on_connection_close(self) -> None: assert self.delegate is not None self.delegate.on_connection_close() class _DefaultMessageDelegate(httputil.HTTPMessageDelegate): def __init__(self, connection: httputil.HTTPConnection) -> None: self.connection = connection def finish(self) -> None: self.connection.write_headers( httputil.ResponseStartLine("HTTP/1.1", 404, "Not Found"), httputil.HTTPHeaders(), ) self.connection.finish() # _RuleList can either contain pre-constructed Rules or a sequence of # arguments to be passed to the Rule constructor. _RuleList = List[ Union[ "Rule", List[Any], # Can't do detailed typechecking of lists. Tuple[Union[str, "Matcher"], Any], Tuple[Union[str, "Matcher"], Any, Dict[str, Any]], Tuple[Union[str, "Matcher"], Any, Dict[str, Any], str], ] ] class RuleRouter(Router): """Rule-based router implementation.""" def __init__(self, rules: Optional[_RuleList] = None) -> None: """Constructs a router from an ordered list of rules:: RuleRouter([ Rule(PathMatches("/handler"), Target), # ... more rules ]) You can also omit explicit `Rule` constructor and use tuples of arguments:: RuleRouter([ (PathMatches("/handler"), Target), ]) `PathMatches` is a default matcher, so the example above can be simplified:: RuleRouter([ ("/handler", Target), ]) In the examples above, ``Target`` can be a nested `Router` instance, an instance of `~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument. :arg rules: a list of `Rule` instances or tuples of `Rule` constructor arguments. """ self.rules = [] # type: List[Rule] if rules: self.add_rules(rules) def add_rules(self, rules: _RuleList) -> None: """Appends new rules to the router. :arg rules: a list of Rule instances (or tuples of arguments, which are passed to Rule constructor). """ for rule in rules: if isinstance(rule, (tuple, list)): assert len(rule) in (2, 3, 4) if isinstance(rule[0], basestring_type): rule = Rule(PathMatches(rule[0]), *rule[1:]) else: rule = Rule(*rule) self.rules.append(self.process_rule(rule)) def process_rule(self, rule: "Rule") -> "Rule": """Override this method for additional preprocessing of each rule. :arg Rule rule: a rule to be processed. :returns: the same or modified Rule instance. """ return rule def find_handler( self, request: httputil.HTTPServerRequest, **kwargs: Any ) -> Optional[httputil.HTTPMessageDelegate]: for rule in self.rules: target_params = rule.matcher.match(request) if target_params is not None: if rule.target_kwargs: target_params["target_kwargs"] = rule.target_kwargs delegate = self.get_target_delegate( rule.target, request, **target_params ) if delegate is not None: return delegate return None def get_target_delegate( self, target: Any, request: httputil.HTTPServerRequest, **target_params: Any ) -> Optional[httputil.HTTPMessageDelegate]: """Returns an instance of `~.httputil.HTTPMessageDelegate` for a Rule's target. This method is called by `~.find_handler` and can be extended to provide additional target types. :arg target: a Rule's target. :arg httputil.HTTPServerRequest request: current request. :arg target_params: additional parameters that can be useful for `~.httputil.HTTPMessageDelegate` creation. """ if isinstance(target, Router): return target.find_handler(request, **target_params) elif isinstance(target, httputil.HTTPServerConnectionDelegate): assert request.connection is not None return target.start_request(request.server_connection, request.connection) elif callable(target): assert request.connection is not None return _CallableAdapter( partial(target, **target_params), request.connection ) return None class ReversibleRuleRouter(ReversibleRouter, RuleRouter): """A rule-based router that implements ``reverse_url`` method. Each rule added to this router may have a ``name`` attribute that can be used to reconstruct an original uri. The actual reconstruction takes place in a rule's matcher (see `Matcher.reverse`). """ def __init__(self, rules: Optional[_RuleList] = None) -> None: self.named_rules = {} # type: Dict[str, Any] super().__init__(rules) def process_rule(self, rule: "Rule") -> "Rule": rule = super().process_rule(rule) if rule.name: if rule.name in self.named_rules: app_log.warning( "Multiple handlers named %s; replacing previous value", rule.name ) self.named_rules[rule.name] = rule return rule def reverse_url(self, name: str, *args: Any) -> Optional[str]: if name in self.named_rules: return self.named_rules[name].matcher.reverse(*args) for rule in self.rules: if isinstance(rule.target, ReversibleRouter): reversed_url = rule.target.reverse_url(name, *args) if reversed_url is not None: return reversed_url return None class Rule(object): """A routing rule.""" def __init__( self, matcher: "Matcher", target: Any, target_kwargs: Optional[Dict[str, Any]] = None, name: Optional[str] = None, ) -> None: """Constructs a Rule instance. :arg Matcher matcher: a `Matcher` instance used for determining whether the rule should be considered a match for a specific request. :arg target: a Rule's target (typically a ``RequestHandler`` or `~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`, depending on routing implementation). :arg dict target_kwargs: a dict of parameters that can be useful at the moment of target instantiation (for example, ``status_code`` for a ``RequestHandler`` subclass). They end up in ``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate` method. :arg str name: the name of the rule that can be used to find it in `ReversibleRouter.reverse_url` implementation. """ if isinstance(target, str): # import the Module and instantiate the class # Must be a fully qualified name (module.ClassName) target = import_object(target) self.matcher = matcher # type: Matcher self.target = target self.target_kwargs = target_kwargs if target_kwargs else {} self.name = name def reverse(self, *args: Any) -> Optional[str]: return self.matcher.reverse(*args) def __repr__(self) -> str: return "%s(%r, %s, kwargs=%r, name=%r)" % ( self.__class__.__name__, self.matcher, self.target, self.target_kwargs, self.name, ) class Matcher(object): """Represents a matcher for request features.""" def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: """Matches current instance against the request. :arg httputil.HTTPServerRequest request: current HTTP request :returns: a dict of parameters to be passed to the target handler (for example, ``handler_kwargs``, ``path_args``, ``path_kwargs`` can be passed for proper `~.web.RequestHandler` instantiation). An empty dict is a valid (and common) return value to indicate a match when the argument-passing features are not used. ``None`` must be returned to indicate that there is no match.""" raise NotImplementedError() def reverse(self, *args: Any) -> Optional[str]: """Reconstructs full url from matcher instance and additional arguments.""" return None class AnyMatches(Matcher): """Matches any request.""" def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: return {} class HostMatches(Matcher): """Matches requests from hosts specified by ``host_pattern`` regex.""" def __init__(self, host_pattern: Union[str, Pattern]) -> None: if isinstance(host_pattern, basestring_type): if not host_pattern.endswith("$"): host_pattern += "$" self.host_pattern = re.compile(host_pattern) else: self.host_pattern = host_pattern def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: if self.host_pattern.match(request.host_name): return {} return None class DefaultHostMatches(Matcher): """Matches requests from host that is equal to application's default_host. Always returns no match if ``X-Real-Ip`` header is present. """ def __init__(self, application: Any, host_pattern: Pattern) -> None: self.application = application self.host_pattern = host_pattern def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: # Look for default host if not behind load balancer (for debugging) if "X-Real-Ip" not in request.headers: if self.host_pattern.match(self.application.default_host): return {} return None class PathMatches(Matcher): """Matches requests with paths specified by ``path_pattern`` regex.""" def __init__(self, path_pattern: Union[str, Pattern]) -> None: if isinstance(path_pattern, basestring_type): if not path_pattern.endswith("$"): path_pattern += "$" self.regex = re.compile(path_pattern) else: self.regex = path_pattern assert len(self.regex.groupindex) in (0, self.regex.groups), ( "groups in url regexes must either be all named or all " "positional: %r" % self.regex.pattern ) self._path, self._group_count = self._find_groups() def match(self, request: httputil.HTTPServerRequest) -> Optional[Dict[str, Any]]: match = self.regex.match(request.path) if match is None: return None if not self.regex.groups: return {} path_args = [] # type: List[bytes] path_kwargs = {} # type: Dict[str, bytes] # Pass matched groups to the handler. Since # match.groups() includes both named and # unnamed groups, we want to use either groups # or groupdict but not both. if self.regex.groupindex: path_kwargs = dict( (str(k), _unquote_or_none(v)) for (k, v) in match.groupdict().items() ) else: path_args = [_unquote_or_none(s) for s in match.groups()] return dict(path_args=path_args, path_kwargs=path_kwargs) def reverse(self, *args: Any) -> Optional[str]: if self._path is None: raise ValueError("Cannot reverse url regex " + self.regex.pattern) assert len(args) == self._group_count, ( "required number of arguments " "not found" ) if not len(args): return self._path converted_args = [] for a in args: if not isinstance(a, (unicode_type, bytes)): a = str(a) converted_args.append(url_escape(utf8(a), plus=False)) return self._path % tuple(converted_args) def _find_groups(self) -> Tuple[Optional[str], Optional[int]]: """Returns a tuple (reverse string, group count) for a url. For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method would return ('/%s/%s/', 2). """ pattern = self.regex.pattern if pattern.startswith("^"): pattern = pattern[1:] if pattern.endswith("$"): pattern = pattern[:-1] if self.regex.groups != pattern.count("("): # The pattern is too complicated for our simplistic matching, # so we can't support reversing it. return None, None pieces = [] for fragment in pattern.split("("): if ")" in fragment: paren_loc = fragment.index(")") if paren_loc >= 0: try: unescaped_fragment = re_unescape(fragment[paren_loc + 1 :]) except ValueError: # If we can't unescape part of it, we can't # reverse this url. return (None, None) pieces.append("%s" + unescaped_fragment) else: try: unescaped_fragment = re_unescape(fragment) except ValueError: # If we can't unescape part of it, we can't # reverse this url. return (None, None) pieces.append(unescaped_fragment) return "".join(pieces), self.regex.groups class URLSpec(Rule): """Specifies mappings between URLs and handlers. .. versionchanged: 4.5 `URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for backwards compatibility. """ def __init__( self, pattern: Union[str, Pattern], handler: Any, kwargs: Optional[Dict[str, Any]] = None, name: Optional[str] = None, ) -> None: """Parameters: * ``pattern``: Regular expression to be matched. Any capturing groups in the regex will be passed in to the handler's get/post/etc methods as arguments (by keyword if named, by position if unnamed. Named and unnamed capturing groups may not be mixed in the same rule). * ``handler``: `~.web.RequestHandler` subclass to be invoked. * ``kwargs`` (optional): A dictionary of additional arguments to be passed to the handler's constructor. * ``name`` (optional): A name for this handler. Used by `~.web.Application.reverse_url`. """ matcher = PathMatches(pattern) super().__init__(matcher, handler, kwargs, name) self.regex = matcher.regex self.handler_class = self.target self.kwargs = kwargs def __repr__(self) -> str: return "%s(%r, %s, kwargs=%r, name=%r)" % ( self.__class__.__name__, self.regex.pattern, self.handler_class, self.kwargs, self.name, ) @overload def _unquote_or_none(s: str) -> bytes: pass @overload # noqa: F811 def _unquote_or_none(s: None) -> None: pass def _unquote_or_none(s: Optional[str]) -> Optional[bytes]: # noqa: F811 """None-safe wrapper around url_unescape to handle unmatched optional groups correctly. Note that args are passed as bytes so the handler can decide what encoding to use. """ if s is None: return s return url_unescape(s, encoding=None, plus=False) tornado-6.1.0/tornado/simple_httpclient.py000066400000000000000000000656721374705040500207600ustar00rootroot00000000000000from tornado.escape import _unicode from tornado import gen, version from tornado.httpclient import ( HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy, HTTPRequest, ) from tornado import httputil from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters from tornado.ioloop import IOLoop from tornado.iostream import StreamClosedError, IOStream from tornado.netutil import ( Resolver, OverrideResolver, _client_ssl_defaults, is_valid_ip, ) from tornado.log import gen_log from tornado.tcpclient import TCPClient import base64 import collections import copy import functools import re import socket import ssl import sys import time from io import BytesIO import urllib.parse from typing import Dict, Any, Callable, Optional, Type, Union from types import TracebackType import typing if typing.TYPE_CHECKING: from typing import Deque, Tuple, List # noqa: F401 class HTTPTimeoutError(HTTPError): """Error raised by SimpleAsyncHTTPClient on timeout. For historical reasons, this is a subclass of `.HTTPClientError` which simulates a response code of 599. .. versionadded:: 5.1 """ def __init__(self, message: str) -> None: super().__init__(599, message=message) def __str__(self) -> str: return self.message or "Timeout" class HTTPStreamClosedError(HTTPError): """Error raised by SimpleAsyncHTTPClient when the underlying stream is closed. When a more specific exception is available (such as `ConnectionResetError`), it may be raised instead of this one. For historical reasons, this is a subclass of `.HTTPClientError` which simulates a response code of 599. .. versionadded:: 5.1 """ def __init__(self, message: str) -> None: super().__init__(599, message=message) def __str__(self) -> str: return self.message or "Stream closed" class SimpleAsyncHTTPClient(AsyncHTTPClient): """Non-blocking HTTP client with no external dependencies. This class implements an HTTP 1.1 client on top of Tornado's IOStreams. Some features found in the curl-based AsyncHTTPClient are not yet supported. In particular, proxies are not supported, connections are not reused, and callers cannot select the network interface to be used. """ def initialize( # type: ignore self, max_clients: int = 10, hostname_mapping: Optional[Dict[str, str]] = None, max_buffer_size: int = 104857600, resolver: Optional[Resolver] = None, defaults: Optional[Dict[str, Any]] = None, max_header_size: Optional[int] = None, max_body_size: Optional[int] = None, ) -> None: """Creates a AsyncHTTPClient. Only a single AsyncHTTPClient instance exists per IOLoop in order to provide limitations on the number of pending connections. ``force_instance=True`` may be used to suppress this behavior. Note that because of this implicit reuse, unless ``force_instance`` is used, only the first call to the constructor actually uses its arguments. It is recommended to use the ``configure`` method instead of the constructor to ensure that arguments take effect. ``max_clients`` is the number of concurrent requests that can be in progress; when this limit is reached additional requests will be queued. Note that time spent waiting in this queue still counts against the ``request_timeout``. ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses. It can be used to make local DNS changes when modifying system-wide settings like ``/etc/hosts`` is not possible or desirable (e.g. in unittests). ``max_buffer_size`` (default 100MB) is the number of bytes that can be read into memory at once. ``max_body_size`` (defaults to ``max_buffer_size``) is the largest response body that the client will accept. Without a ``streaming_callback``, the smaller of these two limits applies; with a ``streaming_callback`` only ``max_body_size`` does. .. versionchanged:: 4.2 Added the ``max_body_size`` argument. """ super().initialize(defaults=defaults) self.max_clients = max_clients self.queue = ( collections.deque() ) # type: Deque[Tuple[object, HTTPRequest, Callable[[HTTPResponse], None]]] self.active = ( {} ) # type: Dict[object, Tuple[HTTPRequest, Callable[[HTTPResponse], None]]] self.waiting = ( {} ) # type: Dict[object, Tuple[HTTPRequest, Callable[[HTTPResponse], None], object]] self.max_buffer_size = max_buffer_size self.max_header_size = max_header_size self.max_body_size = max_body_size # TCPClient could create a Resolver for us, but we have to do it # ourselves to support hostname_mapping. if resolver: self.resolver = resolver self.own_resolver = False else: self.resolver = Resolver() self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver( resolver=self.resolver, mapping=hostname_mapping ) self.tcp_client = TCPClient(resolver=self.resolver) def close(self) -> None: super().close() if self.own_resolver: self.resolver.close() self.tcp_client.close() def fetch_impl( self, request: HTTPRequest, callback: Callable[[HTTPResponse], None] ) -> None: key = object() self.queue.append((key, request, callback)) assert request.connect_timeout is not None assert request.request_timeout is not None timeout_handle = None if len(self.active) >= self.max_clients: timeout = ( min(request.connect_timeout, request.request_timeout) or request.connect_timeout or request.request_timeout ) # min but skip zero if timeout: timeout_handle = self.io_loop.add_timeout( self.io_loop.time() + timeout, functools.partial(self._on_timeout, key, "in request queue"), ) self.waiting[key] = (request, callback, timeout_handle) self._process_queue() if self.queue: gen_log.debug( "max_clients limit reached, request queued. " "%d active, %d queued requests." % (len(self.active), len(self.queue)) ) def _process_queue(self) -> None: while self.queue and len(self.active) < self.max_clients: key, request, callback = self.queue.popleft() if key not in self.waiting: continue self._remove_timeout(key) self.active[key] = (request, callback) release_callback = functools.partial(self._release_fetch, key) self._handle_request(request, release_callback, callback) def _connection_class(self) -> type: return _HTTPConnection def _handle_request( self, request: HTTPRequest, release_callback: Callable[[], None], final_callback: Callable[[HTTPResponse], None], ) -> None: self._connection_class()( self, request, release_callback, final_callback, self.max_buffer_size, self.tcp_client, self.max_header_size, self.max_body_size, ) def _release_fetch(self, key: object) -> None: del self.active[key] self._process_queue() def _remove_timeout(self, key: object) -> None: if key in self.waiting: request, callback, timeout_handle = self.waiting[key] if timeout_handle is not None: self.io_loop.remove_timeout(timeout_handle) del self.waiting[key] def _on_timeout(self, key: object, info: Optional[str] = None) -> None: """Timeout callback of request. Construct a timeout HTTPResponse when a timeout occurs. :arg object key: A simple object to mark the request. :info string key: More detailed timeout information. """ request, callback, timeout_handle = self.waiting[key] self.queue.remove((key, request, callback)) error_message = "Timeout {0}".format(info) if info else "Timeout" timeout_response = HTTPResponse( request, 599, error=HTTPTimeoutError(error_message), request_time=self.io_loop.time() - request.start_time, ) self.io_loop.add_callback(callback, timeout_response) del self.waiting[key] class _HTTPConnection(httputil.HTTPMessageDelegate): _SUPPORTED_METHODS = set( ["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"] ) def __init__( self, client: Optional[SimpleAsyncHTTPClient], request: HTTPRequest, release_callback: Callable[[], None], final_callback: Callable[[HTTPResponse], None], max_buffer_size: int, tcp_client: TCPClient, max_header_size: int, max_body_size: int, ) -> None: self.io_loop = IOLoop.current() self.start_time = self.io_loop.time() self.start_wall_time = time.time() self.client = client self.request = request self.release_callback = release_callback self.final_callback = final_callback self.max_buffer_size = max_buffer_size self.tcp_client = tcp_client self.max_header_size = max_header_size self.max_body_size = max_body_size self.code = None # type: Optional[int] self.headers = None # type: Optional[httputil.HTTPHeaders] self.chunks = [] # type: List[bytes] self._decompressor = None # Timeout handle returned by IOLoop.add_timeout self._timeout = None # type: object self._sockaddr = None IOLoop.current().add_future( gen.convert_yielded(self.run()), lambda f: f.result() ) async def run(self) -> None: try: self.parsed = urllib.parse.urlsplit(_unicode(self.request.url)) if self.parsed.scheme not in ("http", "https"): raise ValueError("Unsupported url scheme: %s" % self.request.url) # urlsplit results have hostname and port results, but they # didn't support ipv6 literals until python 2.7. netloc = self.parsed.netloc if "@" in netloc: userpass, _, netloc = netloc.rpartition("@") host, port = httputil.split_host_and_port(netloc) if port is None: port = 443 if self.parsed.scheme == "https" else 80 if re.match(r"^\[.*\]$", host): # raw ipv6 addresses in urls are enclosed in brackets host = host[1:-1] self.parsed_hostname = host # save final host for _on_connect if self.request.allow_ipv6 is False: af = socket.AF_INET else: af = socket.AF_UNSPEC ssl_options = self._get_ssl_options(self.parsed.scheme) source_ip = None if self.request.network_interface: if is_valid_ip(self.request.network_interface): source_ip = self.request.network_interface else: raise ValueError( "Unrecognized IPv4 or IPv6 address for network_interface, got %r" % (self.request.network_interface,) ) timeout = ( min(self.request.connect_timeout, self.request.request_timeout) or self.request.connect_timeout or self.request.request_timeout ) # min but skip zero if timeout: self._timeout = self.io_loop.add_timeout( self.start_time + timeout, functools.partial(self._on_timeout, "while connecting"), ) stream = await self.tcp_client.connect( host, port, af=af, ssl_options=ssl_options, max_buffer_size=self.max_buffer_size, source_ip=source_ip, ) if self.final_callback is None: # final_callback is cleared if we've hit our timeout. stream.close() return self.stream = stream self.stream.set_close_callback(self.on_connection_close) self._remove_timeout() if self.final_callback is None: return if self.request.request_timeout: self._timeout = self.io_loop.add_timeout( self.start_time + self.request.request_timeout, functools.partial(self._on_timeout, "during request"), ) if ( self.request.method not in self._SUPPORTED_METHODS and not self.request.allow_nonstandard_methods ): raise KeyError("unknown method %s" % self.request.method) for key in ( "proxy_host", "proxy_port", "proxy_username", "proxy_password", "proxy_auth_mode", ): if getattr(self.request, key, None): raise NotImplementedError("%s not supported" % key) if "Connection" not in self.request.headers: self.request.headers["Connection"] = "close" if "Host" not in self.request.headers: if "@" in self.parsed.netloc: self.request.headers["Host"] = self.parsed.netloc.rpartition("@")[ -1 ] else: self.request.headers["Host"] = self.parsed.netloc username, password = None, None if self.parsed.username is not None: username, password = self.parsed.username, self.parsed.password elif self.request.auth_username is not None: username = self.request.auth_username password = self.request.auth_password or "" if username is not None: assert password is not None if self.request.auth_mode not in (None, "basic"): raise ValueError("unsupported auth_mode %s", self.request.auth_mode) self.request.headers["Authorization"] = "Basic " + _unicode( base64.b64encode( httputil.encode_username_password(username, password) ) ) if self.request.user_agent: self.request.headers["User-Agent"] = self.request.user_agent elif self.request.headers.get("User-Agent") is None: self.request.headers["User-Agent"] = "Tornado/{}".format(version) if not self.request.allow_nonstandard_methods: # Some HTTP methods nearly always have bodies while others # almost never do. Fail in this case unless the user has # opted out of sanity checks with allow_nonstandard_methods. body_expected = self.request.method in ("POST", "PATCH", "PUT") body_present = ( self.request.body is not None or self.request.body_producer is not None ) if (body_expected and not body_present) or ( body_present and not body_expected ): raise ValueError( "Body must %sbe None for method %s (unless " "allow_nonstandard_methods is true)" % ("not " if body_expected else "", self.request.method) ) if self.request.expect_100_continue: self.request.headers["Expect"] = "100-continue" if self.request.body is not None: # When body_producer is used the caller is responsible for # setting Content-Length (or else chunked encoding will be used). self.request.headers["Content-Length"] = str(len(self.request.body)) if ( self.request.method == "POST" and "Content-Type" not in self.request.headers ): self.request.headers[ "Content-Type" ] = "application/x-www-form-urlencoded" if self.request.decompress_response: self.request.headers["Accept-Encoding"] = "gzip" req_path = (self.parsed.path or "/") + ( ("?" + self.parsed.query) if self.parsed.query else "" ) self.connection = self._create_connection(stream) start_line = httputil.RequestStartLine(self.request.method, req_path, "") self.connection.write_headers(start_line, self.request.headers) if self.request.expect_100_continue: await self.connection.read_response(self) else: await self._write_body(True) except Exception: if not self._handle_exception(*sys.exc_info()): raise def _get_ssl_options( self, scheme: str ) -> Union[None, Dict[str, Any], ssl.SSLContext]: if scheme == "https": if self.request.ssl_options is not None: return self.request.ssl_options # If we are using the defaults, don't construct a # new SSLContext. if ( self.request.validate_cert and self.request.ca_certs is None and self.request.client_cert is None and self.request.client_key is None ): return _client_ssl_defaults ssl_ctx = ssl.create_default_context( ssl.Purpose.SERVER_AUTH, cafile=self.request.ca_certs ) if not self.request.validate_cert: ssl_ctx.check_hostname = False ssl_ctx.verify_mode = ssl.CERT_NONE if self.request.client_cert is not None: ssl_ctx.load_cert_chain( self.request.client_cert, self.request.client_key ) if hasattr(ssl, "OP_NO_COMPRESSION"): # See netutil.ssl_options_to_context ssl_ctx.options |= ssl.OP_NO_COMPRESSION return ssl_ctx return None def _on_timeout(self, info: Optional[str] = None) -> None: """Timeout callback of _HTTPConnection instance. Raise a `HTTPTimeoutError` when a timeout occurs. :info string key: More detailed timeout information. """ self._timeout = None error_message = "Timeout {0}".format(info) if info else "Timeout" if self.final_callback is not None: self._handle_exception( HTTPTimeoutError, HTTPTimeoutError(error_message), None ) def _remove_timeout(self) -> None: if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None def _create_connection(self, stream: IOStream) -> HTTP1Connection: stream.set_nodelay(True) connection = HTTP1Connection( stream, True, HTTP1ConnectionParameters( no_keep_alive=True, max_header_size=self.max_header_size, max_body_size=self.max_body_size, decompress=bool(self.request.decompress_response), ), self._sockaddr, ) return connection async def _write_body(self, start_read: bool) -> None: if self.request.body is not None: self.connection.write(self.request.body) elif self.request.body_producer is not None: fut = self.request.body_producer(self.connection.write) if fut is not None: await fut self.connection.finish() if start_read: try: await self.connection.read_response(self) except StreamClosedError: if not self._handle_exception(*sys.exc_info()): raise def _release(self) -> None: if self.release_callback is not None: release_callback = self.release_callback self.release_callback = None # type: ignore release_callback() def _run_callback(self, response: HTTPResponse) -> None: self._release() if self.final_callback is not None: final_callback = self.final_callback self.final_callback = None # type: ignore self.io_loop.add_callback(final_callback, response) def _handle_exception( self, typ: "Optional[Type[BaseException]]", value: Optional[BaseException], tb: Optional[TracebackType], ) -> bool: if self.final_callback: self._remove_timeout() if isinstance(value, StreamClosedError): if value.real_error is None: value = HTTPStreamClosedError("Stream closed") else: value = value.real_error self._run_callback( HTTPResponse( self.request, 599, error=value, request_time=self.io_loop.time() - self.start_time, start_time=self.start_wall_time, ) ) if hasattr(self, "stream"): # TODO: this may cause a StreamClosedError to be raised # by the connection's Future. Should we cancel the # connection more gracefully? self.stream.close() return True else: # If our callback has already been called, we are probably # catching an exception that is not caused by us but rather # some child of our callback. Rather than drop it on the floor, # pass it along, unless it's just the stream being closed. return isinstance(value, StreamClosedError) def on_connection_close(self) -> None: if self.final_callback is not None: message = "Connection closed" if self.stream.error: raise self.stream.error try: raise HTTPStreamClosedError(message) except HTTPStreamClosedError: self._handle_exception(*sys.exc_info()) async def headers_received( self, first_line: Union[httputil.ResponseStartLine, httputil.RequestStartLine], headers: httputil.HTTPHeaders, ) -> None: assert isinstance(first_line, httputil.ResponseStartLine) if self.request.expect_100_continue and first_line.code == 100: await self._write_body(False) return self.code = first_line.code self.reason = first_line.reason self.headers = headers if self._should_follow_redirect(): return if self.request.header_callback is not None: # Reassemble the start line. self.request.header_callback("%s %s %s\r\n" % first_line) for k, v in self.headers.get_all(): self.request.header_callback("%s: %s\r\n" % (k, v)) self.request.header_callback("\r\n") def _should_follow_redirect(self) -> bool: if self.request.follow_redirects: assert self.request.max_redirects is not None return ( self.code in (301, 302, 303, 307, 308) and self.request.max_redirects > 0 and self.headers is not None and self.headers.get("Location") is not None ) return False def finish(self) -> None: assert self.code is not None data = b"".join(self.chunks) self._remove_timeout() original_request = getattr(self.request, "original_request", self.request) if self._should_follow_redirect(): assert isinstance(self.request, _RequestProxy) new_request = copy.copy(self.request.request) new_request.url = urllib.parse.urljoin( self.request.url, self.headers["Location"] ) new_request.max_redirects = self.request.max_redirects - 1 del new_request.headers["Host"] # https://tools.ietf.org/html/rfc7231#section-6.4 # # The original HTTP spec said that after a 301 or 302 # redirect, the request method should be preserved. # However, browsers implemented this by changing the # method to GET, and the behavior stuck. 303 redirects # always specified this POST-to-GET behavior, arguably # for *all* methods, but libcurl < 7.70 only does this # for POST, while libcurl >= 7.70 does it for other methods. if (self.code == 303 and self.request.method != "HEAD") or ( self.code in (301, 302) and self.request.method == "POST" ): new_request.method = "GET" new_request.body = None for h in [ "Content-Length", "Content-Type", "Content-Encoding", "Transfer-Encoding", ]: try: del self.request.headers[h] except KeyError: pass new_request.original_request = original_request final_callback = self.final_callback self.final_callback = None self._release() fut = self.client.fetch(new_request, raise_error=False) fut.add_done_callback(lambda f: final_callback(f.result())) self._on_end_request() return if self.request.streaming_callback: buffer = BytesIO() else: buffer = BytesIO(data) # TODO: don't require one big string? response = HTTPResponse( original_request, self.code, reason=getattr(self, "reason", None), headers=self.headers, request_time=self.io_loop.time() - self.start_time, start_time=self.start_wall_time, buffer=buffer, effective_url=self.request.url, ) self._run_callback(response) self._on_end_request() def _on_end_request(self) -> None: self.stream.close() def data_received(self, chunk: bytes) -> None: if self._should_follow_redirect(): # We're going to follow a redirect so just discard the body. return if self.request.streaming_callback is not None: self.request.streaming_callback(chunk) else: self.chunks.append(chunk) if __name__ == "__main__": AsyncHTTPClient.configure(SimpleAsyncHTTPClient) main() tornado-6.1.0/tornado/speedups.c000066400000000000000000000027641374705040500166440ustar00rootroot00000000000000#define PY_SSIZE_T_CLEAN #include #include static PyObject* websocket_mask(PyObject* self, PyObject* args) { const char* mask; Py_ssize_t mask_len; uint32_t uint32_mask; uint64_t uint64_mask; const char* data; Py_ssize_t data_len; Py_ssize_t i; PyObject* result; char* buf; if (!PyArg_ParseTuple(args, "s#s#", &mask, &mask_len, &data, &data_len)) { return NULL; } uint32_mask = ((uint32_t*)mask)[0]; result = PyBytes_FromStringAndSize(NULL, data_len); if (!result) { return NULL; } buf = PyBytes_AsString(result); if (sizeof(size_t) >= 8) { uint64_mask = uint32_mask; uint64_mask = (uint64_mask << 32) | uint32_mask; while (data_len >= 8) { ((uint64_t*)buf)[0] = ((uint64_t*)data)[0] ^ uint64_mask; data += 8; buf += 8; data_len -= 8; } } while (data_len >= 4) { ((uint32_t*)buf)[0] = ((uint32_t*)data)[0] ^ uint32_mask; data += 4; buf += 4; data_len -= 4; } for (i = 0; i < data_len; i++) { buf[i] = data[i] ^ mask[i]; } return result; } static PyMethodDef methods[] = { {"websocket_mask", websocket_mask, METH_VARARGS, ""}, {NULL, NULL, 0, NULL} }; static struct PyModuleDef speedupsmodule = { PyModuleDef_HEAD_INIT, "speedups", NULL, -1, methods }; PyMODINIT_FUNC PyInit_speedups(void) { return PyModule_Create(&speedupsmodule); } tornado-6.1.0/tornado/speedups.pyi000066400000000000000000000000731374705040500172120ustar00rootroot00000000000000def websocket_mask(mask: bytes, data: bytes) -> bytes: ... tornado-6.1.0/tornado/tcpclient.py000066400000000000000000000274541374705040500172120ustar00rootroot00000000000000# # Copyright 2014 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A non-blocking TCP connection factory. """ import functools import socket import numbers import datetime import ssl from tornado.concurrent import Future, future_add_done_callback from tornado.ioloop import IOLoop from tornado.iostream import IOStream from tornado import gen from tornado.netutil import Resolver from tornado.gen import TimeoutError from typing import Any, Union, Dict, Tuple, List, Callable, Iterator, Optional, Set _INITIAL_CONNECT_TIMEOUT = 0.3 class _Connector(object): """A stateless implementation of the "Happy Eyeballs" algorithm. "Happy Eyeballs" is documented in RFC6555 as the recommended practice for when both IPv4 and IPv6 addresses are available. In this implementation, we partition the addresses by family, and make the first connection attempt to whichever address was returned first by ``getaddrinfo``. If that connection fails or times out, we begin a connection in parallel to the first address of the other family. If there are additional failures we retry with other addresses, keeping one connection attempt per family in flight at a time. http://tools.ietf.org/html/rfc6555 """ def __init__( self, addrinfo: List[Tuple], connect: Callable[ [socket.AddressFamily, Tuple], Tuple[IOStream, "Future[IOStream]"] ], ) -> None: self.io_loop = IOLoop.current() self.connect = connect self.future = ( Future() ) # type: Future[Tuple[socket.AddressFamily, Any, IOStream]] self.timeout = None # type: Optional[object] self.connect_timeout = None # type: Optional[object] self.last_error = None # type: Optional[Exception] self.remaining = len(addrinfo) self.primary_addrs, self.secondary_addrs = self.split(addrinfo) self.streams = set() # type: Set[IOStream] @staticmethod def split( addrinfo: List[Tuple], ) -> Tuple[ List[Tuple[socket.AddressFamily, Tuple]], List[Tuple[socket.AddressFamily, Tuple]], ]: """Partition the ``addrinfo`` list by address family. Returns two lists. The first list contains the first entry from ``addrinfo`` and all others with the same family, and the second list contains all other addresses (normally one list will be AF_INET and the other AF_INET6, although non-standard resolvers may return additional families). """ primary = [] secondary = [] primary_af = addrinfo[0][0] for af, addr in addrinfo: if af == primary_af: primary.append((af, addr)) else: secondary.append((af, addr)) return primary, secondary def start( self, timeout: float = _INITIAL_CONNECT_TIMEOUT, connect_timeout: Optional[Union[float, datetime.timedelta]] = None, ) -> "Future[Tuple[socket.AddressFamily, Any, IOStream]]": self.try_connect(iter(self.primary_addrs)) self.set_timeout(timeout) if connect_timeout is not None: self.set_connect_timeout(connect_timeout) return self.future def try_connect(self, addrs: Iterator[Tuple[socket.AddressFamily, Tuple]]) -> None: try: af, addr = next(addrs) except StopIteration: # We've reached the end of our queue, but the other queue # might still be working. Send a final error on the future # only when both queues are finished. if self.remaining == 0 and not self.future.done(): self.future.set_exception( self.last_error or IOError("connection failed") ) return stream, future = self.connect(af, addr) self.streams.add(stream) future_add_done_callback( future, functools.partial(self.on_connect_done, addrs, af, addr) ) def on_connect_done( self, addrs: Iterator[Tuple[socket.AddressFamily, Tuple]], af: socket.AddressFamily, addr: Tuple, future: "Future[IOStream]", ) -> None: self.remaining -= 1 try: stream = future.result() except Exception as e: if self.future.done(): return # Error: try again (but remember what happened so we have an # error to raise in the end) self.last_error = e self.try_connect(addrs) if self.timeout is not None: # If the first attempt failed, don't wait for the # timeout to try an address from the secondary queue. self.io_loop.remove_timeout(self.timeout) self.on_timeout() return self.clear_timeouts() if self.future.done(): # This is a late arrival; just drop it. stream.close() else: self.streams.discard(stream) self.future.set_result((af, addr, stream)) self.close_streams() def set_timeout(self, timeout: float) -> None: self.timeout = self.io_loop.add_timeout( self.io_loop.time() + timeout, self.on_timeout ) def on_timeout(self) -> None: self.timeout = None if not self.future.done(): self.try_connect(iter(self.secondary_addrs)) def clear_timeout(self) -> None: if self.timeout is not None: self.io_loop.remove_timeout(self.timeout) def set_connect_timeout( self, connect_timeout: Union[float, datetime.timedelta] ) -> None: self.connect_timeout = self.io_loop.add_timeout( connect_timeout, self.on_connect_timeout ) def on_connect_timeout(self) -> None: if not self.future.done(): self.future.set_exception(TimeoutError()) self.close_streams() def clear_timeouts(self) -> None: if self.timeout is not None: self.io_loop.remove_timeout(self.timeout) if self.connect_timeout is not None: self.io_loop.remove_timeout(self.connect_timeout) def close_streams(self) -> None: for stream in self.streams: stream.close() class TCPClient(object): """A non-blocking TCP connection factory. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ def __init__(self, resolver: Optional[Resolver] = None) -> None: if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver() self._own_resolver = True def close(self) -> None: if self._own_resolver: self.resolver.close() async def connect( self, host: str, port: int, af: socket.AddressFamily = socket.AF_UNSPEC, ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None, max_buffer_size: Optional[int] = None, source_ip: Optional[str] = None, source_port: Optional[int] = None, timeout: Optional[Union[float, datetime.timedelta]] = None, ) -> IOStream: """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). Using the ``source_ip`` kwarg, one can specify the source IP address to use when establishing the connection. In case the user needs to resolve and use a specific interface, it has to be handled outside of Tornado as this depends very much on the platform. Raises `TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) Similarly, when the user requires a certain source port, it can be specified using the ``source_port`` arg. .. versionchanged:: 4.5 Added the ``source_ip`` and ``source_port`` arguments. .. versionchanged:: 5.0 Added the ``timeout`` argument. """ if timeout is not None: if isinstance(timeout, numbers.Real): timeout = IOLoop.current().time() + timeout elif isinstance(timeout, datetime.timedelta): timeout = IOLoop.current().time() + timeout.total_seconds() else: raise TypeError("Unsupported timeout %r" % timeout) if timeout is not None: addrinfo = await gen.with_timeout( timeout, self.resolver.resolve(host, port, af) ) else: addrinfo = await self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, functools.partial( self._create_stream, max_buffer_size, source_ip=source_ip, source_port=source_port, ), ) af, addr, stream = await connector.start(connect_timeout=timeout) # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: if timeout is not None: stream = await gen.with_timeout( timeout, stream.start_tls( False, ssl_options=ssl_options, server_hostname=host ), ) else: stream = await stream.start_tls( False, ssl_options=ssl_options, server_hostname=host ) return stream def _create_stream( self, max_buffer_size: int, af: socket.AddressFamily, addr: Tuple, source_ip: Optional[str] = None, source_port: Optional[int] = None, ) -> Tuple[IOStream, "Future[IOStream]"]: # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. source_port_bind = source_port if isinstance(source_port, int) else 0 source_ip_bind = source_ip if source_port_bind and not source_ip: # User required a specific port, but did not specify # a certain source IP, will bind to the default loopback. source_ip_bind = "::1" if af == socket.AF_INET6 else "127.0.0.1" # Trying to use the same address family as the requested af socket: # - 127.0.0.1 for IPv4 # - ::1 for IPv6 socket_obj = socket.socket(af) if source_port_bind or source_ip_bind: # If the user requires binding also to a specific IP/port. try: socket_obj.bind((source_ip_bind, source_port_bind)) except socket.error: socket_obj.close() # Fail loudly if unable to use the IP/port. raise try: stream = IOStream(socket_obj, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() # type: Future[IOStream] fu.set_exception(e) return stream, fu else: return stream, stream.connect(addr) tornado-6.1.0/tornado/tcpserver.py000066400000000000000000000316721374705040500172370ustar00rootroot00000000000000# # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A non-blocking, single-threaded TCP server.""" import errno import os import socket import ssl from tornado import gen from tornado.log import app_log from tornado.ioloop import IOLoop from tornado.iostream import IOStream, SSLIOStream from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket from tornado import process from tornado.util import errno_from_exception import typing from typing import Union, Dict, Any, Iterable, Optional, Awaitable if typing.TYPE_CHECKING: from typing import Callable, List # noqa: F401 class TCPServer(object): r"""A non-blocking, single-threaded TCP server. To use `TCPServer`, define a subclass which overrides the `handle_stream` method. For example, a simple echo server could be defined like this:: from tornado.tcpserver import TCPServer from tornado.iostream import StreamClosedError from tornado import gen class EchoServer(TCPServer): async def handle_stream(self, stream, address): while True: try: data = await stream.read_until(b"\n") await stream.write(data) except StreamClosedError: break To make this server serve SSL traffic, send the ``ssl_options`` keyword argument with an `ssl.SSLContext` object. For compatibility with older versions of Python ``ssl_options`` may also be a dictionary of keyword arguments for the `ssl.wrap_socket` method.:: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), os.path.join(data_dir, "mydomain.key")) TCPServer(ssl_options=ssl_ctx) `TCPServer` initialization follows one of three patterns: 1. `listen`: simple single-process:: server = TCPServer() server.listen(8888) IOLoop.current().start() 2. `bind`/`start`: simple multi-process:: server = TCPServer() server.bind(8888) server.start(0) # Forks multiple sub-processes IOLoop.current().start() When using this interface, an `.IOLoop` must *not* be passed to the `TCPServer` constructor. `start` will always start the server on the default singleton `.IOLoop`. 3. `add_sockets`: advanced multi-process:: sockets = bind_sockets(8888) tornado.process.fork_processes(0) server = TCPServer() server.add_sockets(sockets) IOLoop.current().start() The `add_sockets` interface is more complicated, but it can be used with `tornado.process.fork_processes` to give you more flexibility in when the fork happens. `add_sockets` can also be used in single-process servers if you want to create your listening sockets in some way other than `~tornado.netutil.bind_sockets`. .. versionadded:: 3.1 The ``max_buffer_size`` argument. .. versionchanged:: 5.0 The ``io_loop`` argument has been removed. """ def __init__( self, ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None, max_buffer_size: Optional[int] = None, read_chunk_size: Optional[int] = None, ) -> None: self.ssl_options = ssl_options self._sockets = {} # type: Dict[int, socket.socket] self._handlers = {} # type: Dict[int, Callable[[], None]] self._pending_sockets = [] # type: List[socket.socket] self._started = False self._stopped = False self.max_buffer_size = max_buffer_size self.read_chunk_size = read_chunk_size # Verify the SSL options. Otherwise we don't get errors until clients # connect. This doesn't verify that the keys are legitimate, but # the SSL module doesn't do that until there is a connected socket # which seems like too much work if self.ssl_options is not None and isinstance(self.ssl_options, dict): # Only certfile is required: it can contain both keys if "certfile" not in self.ssl_options: raise KeyError('missing key "certfile" in ssl_options') if not os.path.exists(self.ssl_options["certfile"]): raise ValueError( 'certfile "%s" does not exist' % self.ssl_options["certfile"] ) if "keyfile" in self.ssl_options and not os.path.exists( self.ssl_options["keyfile"] ): raise ValueError( 'keyfile "%s" does not exist' % self.ssl_options["keyfile"] ) def listen(self, port: int, address: str = "") -> None: """Starts accepting connections on the given port. This method may be called more than once to listen on multiple ports. `listen` takes effect immediately; it is not necessary to call `TCPServer.start` afterwards. It is, however, necessary to start the `.IOLoop`. """ sockets = bind_sockets(port, address=address) self.add_sockets(sockets) def add_sockets(self, sockets: Iterable[socket.socket]) -> None: """Makes this server start accepting connections on the given sockets. The ``sockets`` parameter is a list of socket objects such as those returned by `~tornado.netutil.bind_sockets`. `add_sockets` is typically used in combination with that method and `tornado.process.fork_processes` to provide greater control over the initialization of a multi-process server. """ for sock in sockets: self._sockets[sock.fileno()] = sock self._handlers[sock.fileno()] = add_accept_handler( sock, self._handle_connection ) def add_socket(self, socket: socket.socket) -> None: """Singular version of `add_sockets`. Takes a single socket object.""" self.add_sockets([socket]) def bind( self, port: int, address: Optional[str] = None, family: socket.AddressFamily = socket.AF_UNSPEC, backlog: int = 128, reuse_port: bool = False, ) -> None: """Binds this server to the given port on the given address. To start the server, call `start`. If you want to run this server in a single process, you can call `listen` as a shortcut to the sequence of `bind` and `start` calls. Address may be either an IP address or hostname. If it's a hostname, the server will listen on all IP addresses associated with the name. Address may be an empty string or None to listen on all available interfaces. Family may be set to either `socket.AF_INET` or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise both will be used if available. The ``backlog`` argument has the same meaning as for `socket.listen `. The ``reuse_port`` argument has the same meaning as for `.bind_sockets`. This method may be called multiple times prior to `start` to listen on multiple ports or interfaces. .. versionchanged:: 4.4 Added the ``reuse_port`` argument. """ sockets = bind_sockets( port, address=address, family=family, backlog=backlog, reuse_port=reuse_port ) if self._started: self.add_sockets(sockets) else: self._pending_sockets.extend(sockets) def start( self, num_processes: Optional[int] = 1, max_restarts: Optional[int] = None ) -> None: """Starts this server in the `.IOLoop`. By default, we run the server in this process and do not fork any additional child process. If num_processes is ``None`` or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If num_processes is given and > 1, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``TCPServer.start(n)``. Values of ``num_processes`` other than 1 are not supported on Windows. The ``max_restarts`` argument is passed to `.fork_processes`. .. versionchanged:: 6.0 Added ``max_restarts`` argument. """ assert not self._started self._started = True if num_processes != 1: process.fork_processes(num_processes, max_restarts) sockets = self._pending_sockets self._pending_sockets = [] self.add_sockets(sockets) def stop(self) -> None: """Stops listening for new connections. Requests currently in progress may still continue after the server is stopped. """ if self._stopped: return self._stopped = True for fd, sock in self._sockets.items(): assert sock.fileno() == fd # Unregister socket from IOLoop self._handlers.pop(fd)() sock.close() def handle_stream( self, stream: IOStream, address: tuple ) -> Optional[Awaitable[None]]: """Override to handle a new `.IOStream` from an incoming connection. This method may be a coroutine; if so any exceptions it raises asynchronously will be logged. Accepting of incoming connections will not be blocked by this coroutine. If this `TCPServer` is configured for SSL, ``handle_stream`` may be called before the SSL handshake has completed. Use `.SSLIOStream.wait_for_handshake` if you need to verify the client's certificate or use NPN/ALPN. .. versionchanged:: 4.2 Added the option for this method to be a coroutine. """ raise NotImplementedError() def _handle_connection(self, connection: socket.socket, address: Any) -> None: if self.ssl_options is not None: assert ssl, "Python 2.6+ and OpenSSL required for SSL" try: connection = ssl_wrap_socket( connection, self.ssl_options, server_side=True, do_handshake_on_connect=False, ) except ssl.SSLError as err: if err.args[0] == ssl.SSL_ERROR_EOF: return connection.close() else: raise except socket.error as err: # If the connection is closed immediately after it is created # (as in a port scan), we can get one of several errors. # wrap_socket makes an internal call to getpeername, # which may return either EINVAL (Mac OS X) or ENOTCONN # (Linux). If it returns ENOTCONN, this error is # silently swallowed by the ssl module, so we need to # catch another error later on (AttributeError in # SSLIOStream._do_ssl_handshake). # To test this behavior, try nmap with the -sT flag. # https://github.com/tornadoweb/tornado/pull/750 if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL): return connection.close() else: raise try: if self.ssl_options is not None: stream = SSLIOStream( connection, max_buffer_size=self.max_buffer_size, read_chunk_size=self.read_chunk_size, ) # type: IOStream else: stream = IOStream( connection, max_buffer_size=self.max_buffer_size, read_chunk_size=self.read_chunk_size, ) future = self.handle_stream(stream, address) if future is not None: IOLoop.current().add_future( gen.convert_yielded(future), lambda f: f.result() ) except Exception: app_log.error("Error in connection callback", exc_info=True) tornado-6.1.0/tornado/template.py000066400000000000000000001116461374705040500170350ustar00rootroot00000000000000# # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A simple template system that compiles templates to Python code. Basic usage looks like:: t = template.Template("{{ myvalue }}") print(t.generate(myvalue="XXX")) `Loader` is a class that loads templates from a root directory and caches the compiled templates:: loader = template.Loader("/home/btaylor") print(loader.load("test.html").generate(myvalue="XXX")) We compile all templates to raw Python. Error-reporting is currently... uh, interesting. Syntax for the templates:: ### base.html {% block title %}Default title{% end %}
    {% for student in students %} {% block student %}
  • {{ escape(student.name) }}
  • {% end %} {% end %}
### bold.html {% extends "base.html" %} {% block title %}A bolder title{% end %} {% block student %}
  • {{ escape(student.name) }}
  • {% end %} Unlike most other template systems, we do not put any restrictions on the expressions you can include in your statements. ``if`` and ``for`` blocks get translated exactly into Python, so you can do complex expressions like:: {% for student in [p for p in people if p.student and p.age > 23] %}
  • {{ escape(student.name) }}
  • {% end %} Translating directly to Python means you can apply functions to expressions easily, like the ``escape()`` function in the examples above. You can pass functions in to your template just like any other variable (In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`):: ### Python code def add(x, y): return x + y template.execute(add=add) ### The template {{ add(1, 2) }} We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`, `.json_encode()`, and `.squeeze()` to all templates by default. Typical applications do not create `Template` or `Loader` instances by hand, but instead use the `~.RequestHandler.render` and `~.RequestHandler.render_string` methods of `tornado.web.RequestHandler`, which load templates automatically based on the ``template_path`` `.Application` setting. Variable names beginning with ``_tt_`` are reserved by the template system and should not be used by application code. Syntax Reference ---------------- Template expressions are surrounded by double curly braces: ``{{ ... }}``. The contents may be any python expression, which will be escaped according to the current autoescape setting and inserted into the output. Other template directives use ``{% %}``. To comment out a section so that it is omitted from the output, surround it with ``{# ... #}``. To include a literal ``{{``, ``{%``, or ``{#`` in the output, escape them as ``{{!``, ``{%!``, and ``{#!``, respectively. ``{% apply *function* %}...{% end %}`` Applies a function to the output of all template code between ``apply`` and ``end``:: {% apply linkify %}{{name}} said: {{message}}{% end %} Note that as an implementation detail apply blocks are implemented as nested functions and thus may interact strangely with variables set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}`` within loops. ``{% autoescape *function* %}`` Sets the autoescape mode for the current file. This does not affect other files, even those referenced by ``{% include %}``. Note that autoescaping can also be configured globally, at the `.Application` or `Loader`.:: {% autoescape xhtml_escape %} {% autoescape None %} ``{% block *name* %}...{% end %}`` Indicates a named, replaceable block for use with ``{% extends %}``. Blocks in the parent template will be replaced with the contents of the same-named block in a child template.:: {% block title %}Default title{% end %} {% extends "base.html" %} {% block title %}My page title{% end %} ``{% comment ... %}`` A comment which will be removed from the template output. Note that there is no ``{% end %}`` tag; the comment goes from the word ``comment`` to the closing ``%}`` tag. ``{% extends *filename* %}`` Inherit from another template. Templates that use ``extends`` should contain one or more ``block`` tags to replace content from the parent template. Anything in the child template not contained in a ``block`` tag will be ignored. For an example, see the ``{% block %}`` tag. ``{% for *var* in *expr* %}...{% end %}`` Same as the python ``for`` statement. ``{% break %}`` and ``{% continue %}`` may be used inside the loop. ``{% from *x* import *y* %}`` Same as the python ``import`` statement. ``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}`` Conditional statement - outputs the first section whose condition is true. (The ``elif`` and ``else`` sections are optional) ``{% import *module* %}`` Same as the python ``import`` statement. ``{% include *filename* %}`` Includes another template file. The included file can see all the local variables as if it were copied directly to the point of the ``include`` directive (the ``{% autoescape %}`` directive is an exception). Alternately, ``{% module Template(filename, **kwargs) %}`` may be used to include another template with an isolated namespace. ``{% module *expr* %}`` Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is not escaped:: {% module Template("foo.html", arg=42) %} ``UIModules`` are a feature of the `tornado.web.RequestHandler` class (and specifically its ``render`` method) and will not work when the template system is used on its own in other contexts. ``{% raw *expr* %}`` Outputs the result of the given expression without autoescaping. ``{% set *x* = *y* %}`` Sets a local variable. ``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}`` Same as the python ``try`` statement. ``{% while *condition* %}... {% end %}`` Same as the python ``while`` statement. ``{% break %}`` and ``{% continue %}`` may be used inside the loop. ``{% whitespace *mode* %}`` Sets the whitespace mode for the remainder of the current file (or until the next ``{% whitespace %}`` directive). See `filter_whitespace` for available options. New in Tornado 4.3. """ import datetime from io import StringIO import linecache import os.path import posixpath import re import threading from tornado import escape from tornado.log import app_log from tornado.util import ObjectDict, exec_in, unicode_type from typing import Any, Union, Callable, List, Dict, Iterable, Optional, TextIO import typing if typing.TYPE_CHECKING: from typing import Tuple, ContextManager # noqa: F401 _DEFAULT_AUTOESCAPE = "xhtml_escape" class _UnsetMarker: pass _UNSET = _UnsetMarker() def filter_whitespace(mode: str, text: str) -> str: """Transform whitespace in ``text`` according to ``mode``. Available modes are: * ``all``: Return all whitespace unmodified. * ``single``: Collapse consecutive whitespace with a single whitespace character, preserving newlines. * ``oneline``: Collapse all runs of whitespace into a single space character, removing all newlines in the process. .. versionadded:: 4.3 """ if mode == "all": return text elif mode == "single": text = re.sub(r"([\t ]+)", " ", text) text = re.sub(r"(\s*\n\s*)", "\n", text) return text elif mode == "oneline": return re.sub(r"(\s+)", " ", text) else: raise Exception("invalid whitespace mode %s" % mode) class Template(object): """A compiled template. We compile into Python from the given template_string. You can generate the template from variables with generate(). """ # note that the constructor's signature is not extracted with # autodoc because _UNSET looks like garbage. When changing # this signature update website/sphinx/template.rst too. def __init__( self, template_string: Union[str, bytes], name: str = "", loader: Optional["BaseLoader"] = None, compress_whitespace: Union[bool, _UnsetMarker] = _UNSET, autoescape: Optional[Union[str, _UnsetMarker]] = _UNSET, whitespace: Optional[str] = None, ) -> None: """Construct a Template. :arg str template_string: the contents of the template file. :arg str name: the filename from which the template was loaded (used for error message). :arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible for this template, used to resolve ``{% include %}`` and ``{% extend %}`` directives. :arg bool compress_whitespace: Deprecated since Tornado 4.3. Equivalent to ``whitespace="single"`` if true and ``whitespace="all"`` if false. :arg str autoescape: The name of a function in the template namespace, or ``None`` to disable escaping by default. :arg str whitespace: A string specifying treatment of whitespace; see `filter_whitespace` for options. .. versionchanged:: 4.3 Added ``whitespace`` parameter; deprecated ``compress_whitespace``. """ self.name = escape.native_str(name) if compress_whitespace is not _UNSET: # Convert deprecated compress_whitespace (bool) to whitespace (str). if whitespace is not None: raise Exception("cannot set both whitespace and compress_whitespace") whitespace = "single" if compress_whitespace else "all" if whitespace is None: if loader and loader.whitespace: whitespace = loader.whitespace else: # Whitespace defaults by filename. if name.endswith(".html") or name.endswith(".js"): whitespace = "single" else: whitespace = "all" # Validate the whitespace setting. assert whitespace is not None filter_whitespace(whitespace, "") if not isinstance(autoescape, _UnsetMarker): self.autoescape = autoescape # type: Optional[str] elif loader: self.autoescape = loader.autoescape else: self.autoescape = _DEFAULT_AUTOESCAPE self.namespace = loader.namespace if loader else {} reader = _TemplateReader(name, escape.native_str(template_string), whitespace) self.file = _File(self, _parse(reader, self)) self.code = self._generate_python(loader) self.loader = loader try: # Under python2.5, the fake filename used here must match # the module name used in __name__ below. # The dont_inherit flag prevents template.py's future imports # from being applied to the generated code. self.compiled = compile( escape.to_unicode(self.code), "%s.generated.py" % self.name.replace(".", "_"), "exec", dont_inherit=True, ) except Exception: formatted_code = _format_code(self.code).rstrip() app_log.error("%s code:\n%s", self.name, formatted_code) raise def generate(self, **kwargs: Any) -> bytes: """Generate this template with the given arguments.""" namespace = { "escape": escape.xhtml_escape, "xhtml_escape": escape.xhtml_escape, "url_escape": escape.url_escape, "json_encode": escape.json_encode, "squeeze": escape.squeeze, "linkify": escape.linkify, "datetime": datetime, "_tt_utf8": escape.utf8, # for internal use "_tt_string_types": (unicode_type, bytes), # __name__ and __loader__ allow the traceback mechanism to find # the generated source code. "__name__": self.name.replace(".", "_"), "__loader__": ObjectDict(get_source=lambda name: self.code), } namespace.update(self.namespace) namespace.update(kwargs) exec_in(self.compiled, namespace) execute = typing.cast(Callable[[], bytes], namespace["_tt_execute"]) # Clear the traceback module's cache of source data now that # we've generated a new template (mainly for this module's # unittests, where different tests reuse the same name). linecache.clearcache() return execute() def _generate_python(self, loader: Optional["BaseLoader"]) -> str: buffer = StringIO() try: # named_blocks maps from names to _NamedBlock objects named_blocks = {} # type: Dict[str, _NamedBlock] ancestors = self._get_ancestors(loader) ancestors.reverse() for ancestor in ancestors: ancestor.find_named_blocks(loader, named_blocks) writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template) ancestors[0].generate(writer) return buffer.getvalue() finally: buffer.close() def _get_ancestors(self, loader: Optional["BaseLoader"]) -> List["_File"]: ancestors = [self.file] for chunk in self.file.body.chunks: if isinstance(chunk, _ExtendsBlock): if not loader: raise ParseError( "{% extends %} block found, but no " "template loader" ) template = loader.load(chunk.name, self.name) ancestors.extend(template._get_ancestors(loader)) return ancestors class BaseLoader(object): """Base class for template loaders. You must use a template loader to use template constructs like ``{% extends %}`` and ``{% include %}``. The loader caches all templates after they are loaded the first time. """ def __init__( self, autoescape: str = _DEFAULT_AUTOESCAPE, namespace: Optional[Dict[str, Any]] = None, whitespace: Optional[str] = None, ) -> None: """Construct a template loader. :arg str autoescape: The name of a function in the template namespace, such as "xhtml_escape", or ``None`` to disable autoescaping by default. :arg dict namespace: A dictionary to be added to the default template namespace, or ``None``. :arg str whitespace: A string specifying default behavior for whitespace in templates; see `filter_whitespace` for options. Default is "single" for files ending in ".html" and ".js" and "all" for other files. .. versionchanged:: 4.3 Added ``whitespace`` parameter. """ self.autoescape = autoescape self.namespace = namespace or {} self.whitespace = whitespace self.templates = {} # type: Dict[str, Template] # self.lock protects self.templates. It's a reentrant lock # because templates may load other templates via `include` or # `extends`. Note that thanks to the GIL this code would be safe # even without the lock, but could lead to wasted work as multiple # threads tried to compile the same template simultaneously. self.lock = threading.RLock() def reset(self) -> None: """Resets the cache of compiled templates.""" with self.lock: self.templates = {} def resolve_path(self, name: str, parent_path: Optional[str] = None) -> str: """Converts a possibly-relative path to absolute (used internally).""" raise NotImplementedError() def load(self, name: str, parent_path: Optional[str] = None) -> Template: """Loads a template.""" name = self.resolve_path(name, parent_path=parent_path) with self.lock: if name not in self.templates: self.templates[name] = self._create_template(name) return self.templates[name] def _create_template(self, name: str) -> Template: raise NotImplementedError() class Loader(BaseLoader): """A template loader that loads from a single root directory. """ def __init__(self, root_directory: str, **kwargs: Any) -> None: super().__init__(**kwargs) self.root = os.path.abspath(root_directory) def resolve_path(self, name: str, parent_path: Optional[str] = None) -> str: if ( parent_path and not parent_path.startswith("<") and not parent_path.startswith("/") and not name.startswith("/") ): current_path = os.path.join(self.root, parent_path) file_dir = os.path.dirname(os.path.abspath(current_path)) relative_path = os.path.abspath(os.path.join(file_dir, name)) if relative_path.startswith(self.root): name = relative_path[len(self.root) + 1 :] return name def _create_template(self, name: str) -> Template: path = os.path.join(self.root, name) with open(path, "rb") as f: template = Template(f.read(), name=name, loader=self) return template class DictLoader(BaseLoader): """A template loader that loads from a dictionary.""" def __init__(self, dict: Dict[str, str], **kwargs: Any) -> None: super().__init__(**kwargs) self.dict = dict def resolve_path(self, name: str, parent_path: Optional[str] = None) -> str: if ( parent_path and not parent_path.startswith("<") and not parent_path.startswith("/") and not name.startswith("/") ): file_dir = posixpath.dirname(parent_path) name = posixpath.normpath(posixpath.join(file_dir, name)) return name def _create_template(self, name: str) -> Template: return Template(self.dict[name], name=name, loader=self) class _Node(object): def each_child(self) -> Iterable["_Node"]: return () def generate(self, writer: "_CodeWriter") -> None: raise NotImplementedError() def find_named_blocks( self, loader: Optional[BaseLoader], named_blocks: Dict[str, "_NamedBlock"] ) -> None: for child in self.each_child(): child.find_named_blocks(loader, named_blocks) class _File(_Node): def __init__(self, template: Template, body: "_ChunkList") -> None: self.template = template self.body = body self.line = 0 def generate(self, writer: "_CodeWriter") -> None: writer.write_line("def _tt_execute():", self.line) with writer.indent(): writer.write_line("_tt_buffer = []", self.line) writer.write_line("_tt_append = _tt_buffer.append", self.line) self.body.generate(writer) writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line) def each_child(self) -> Iterable["_Node"]: return (self.body,) class _ChunkList(_Node): def __init__(self, chunks: List[_Node]) -> None: self.chunks = chunks def generate(self, writer: "_CodeWriter") -> None: for chunk in self.chunks: chunk.generate(writer) def each_child(self) -> Iterable["_Node"]: return self.chunks class _NamedBlock(_Node): def __init__(self, name: str, body: _Node, template: Template, line: int) -> None: self.name = name self.body = body self.template = template self.line = line def each_child(self) -> Iterable["_Node"]: return (self.body,) def generate(self, writer: "_CodeWriter") -> None: block = writer.named_blocks[self.name] with writer.include(block.template, self.line): block.body.generate(writer) def find_named_blocks( self, loader: Optional[BaseLoader], named_blocks: Dict[str, "_NamedBlock"] ) -> None: named_blocks[self.name] = self _Node.find_named_blocks(self, loader, named_blocks) class _ExtendsBlock(_Node): def __init__(self, name: str) -> None: self.name = name class _IncludeBlock(_Node): def __init__(self, name: str, reader: "_TemplateReader", line: int) -> None: self.name = name self.template_name = reader.name self.line = line def find_named_blocks( self, loader: Optional[BaseLoader], named_blocks: Dict[str, _NamedBlock] ) -> None: assert loader is not None included = loader.load(self.name, self.template_name) included.file.find_named_blocks(loader, named_blocks) def generate(self, writer: "_CodeWriter") -> None: assert writer.loader is not None included = writer.loader.load(self.name, self.template_name) with writer.include(included, self.line): included.file.body.generate(writer) class _ApplyBlock(_Node): def __init__(self, method: str, line: int, body: _Node) -> None: self.method = method self.line = line self.body = body def each_child(self) -> Iterable["_Node"]: return (self.body,) def generate(self, writer: "_CodeWriter") -> None: method_name = "_tt_apply%d" % writer.apply_counter writer.apply_counter += 1 writer.write_line("def %s():" % method_name, self.line) with writer.indent(): writer.write_line("_tt_buffer = []", self.line) writer.write_line("_tt_append = _tt_buffer.append", self.line) self.body.generate(writer) writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line) writer.write_line( "_tt_append(_tt_utf8(%s(%s())))" % (self.method, method_name), self.line ) class _ControlBlock(_Node): def __init__(self, statement: str, line: int, body: _Node) -> None: self.statement = statement self.line = line self.body = body def each_child(self) -> Iterable[_Node]: return (self.body,) def generate(self, writer: "_CodeWriter") -> None: writer.write_line("%s:" % self.statement, self.line) with writer.indent(): self.body.generate(writer) # Just in case the body was empty writer.write_line("pass", self.line) class _IntermediateControlBlock(_Node): def __init__(self, statement: str, line: int) -> None: self.statement = statement self.line = line def generate(self, writer: "_CodeWriter") -> None: # In case the previous block was empty writer.write_line("pass", self.line) writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1) class _Statement(_Node): def __init__(self, statement: str, line: int) -> None: self.statement = statement self.line = line def generate(self, writer: "_CodeWriter") -> None: writer.write_line(self.statement, self.line) class _Expression(_Node): def __init__(self, expression: str, line: int, raw: bool = False) -> None: self.expression = expression self.line = line self.raw = raw def generate(self, writer: "_CodeWriter") -> None: writer.write_line("_tt_tmp = %s" % self.expression, self.line) writer.write_line( "if isinstance(_tt_tmp, _tt_string_types):" " _tt_tmp = _tt_utf8(_tt_tmp)", self.line, ) writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line) if not self.raw and writer.current_template.autoescape is not None: # In python3 functions like xhtml_escape return unicode, # so we have to convert to utf8 again. writer.write_line( "_tt_tmp = _tt_utf8(%s(_tt_tmp))" % writer.current_template.autoescape, self.line, ) writer.write_line("_tt_append(_tt_tmp)", self.line) class _Module(_Expression): def __init__(self, expression: str, line: int) -> None: super().__init__("_tt_modules." + expression, line, raw=True) class _Text(_Node): def __init__(self, value: str, line: int, whitespace: str) -> None: self.value = value self.line = line self.whitespace = whitespace def generate(self, writer: "_CodeWriter") -> None: value = self.value # Compress whitespace if requested, with a crude heuristic to avoid # altering preformatted whitespace. if "
    " not in value:
                value = filter_whitespace(self.whitespace, value)
    
            if value:
                writer.write_line("_tt_append(%r)" % escape.utf8(value), self.line)
    
    
    class ParseError(Exception):
        """Raised for template syntax errors.
    
        ``ParseError`` instances have ``filename`` and ``lineno`` attributes
        indicating the position of the error.
    
        .. versionchanged:: 4.3
           Added ``filename`` and ``lineno`` attributes.
        """
    
        def __init__(
            self, message: str, filename: Optional[str] = None, lineno: int = 0
        ) -> None:
            self.message = message
            # The names "filename" and "lineno" are chosen for consistency
            # with python SyntaxError.
            self.filename = filename
            self.lineno = lineno
    
        def __str__(self) -> str:
            return "%s at %s:%d" % (self.message, self.filename, self.lineno)
    
    
    class _CodeWriter(object):
        def __init__(
            self,
            file: TextIO,
            named_blocks: Dict[str, _NamedBlock],
            loader: Optional[BaseLoader],
            current_template: Template,
        ) -> None:
            self.file = file
            self.named_blocks = named_blocks
            self.loader = loader
            self.current_template = current_template
            self.apply_counter = 0
            self.include_stack = []  # type: List[Tuple[Template, int]]
            self._indent = 0
    
        def indent_size(self) -> int:
            return self._indent
    
        def indent(self) -> "ContextManager":
            class Indenter(object):
                def __enter__(_) -> "_CodeWriter":
                    self._indent += 1
                    return self
    
                def __exit__(_, *args: Any) -> None:
                    assert self._indent > 0
                    self._indent -= 1
    
            return Indenter()
    
        def include(self, template: Template, line: int) -> "ContextManager":
            self.include_stack.append((self.current_template, line))
            self.current_template = template
    
            class IncludeTemplate(object):
                def __enter__(_) -> "_CodeWriter":
                    return self
    
                def __exit__(_, *args: Any) -> None:
                    self.current_template = self.include_stack.pop()[0]
    
            return IncludeTemplate()
    
        def write_line(
            self, line: str, line_number: int, indent: Optional[int] = None
        ) -> None:
            if indent is None:
                indent = self._indent
            line_comment = "  # %s:%d" % (self.current_template.name, line_number)
            if self.include_stack:
                ancestors = [
                    "%s:%d" % (tmpl.name, lineno) for (tmpl, lineno) in self.include_stack
                ]
                line_comment += " (via %s)" % ", ".join(reversed(ancestors))
            print("    " * indent + line + line_comment, file=self.file)
    
    
    class _TemplateReader(object):
        def __init__(self, name: str, text: str, whitespace: str) -> None:
            self.name = name
            self.text = text
            self.whitespace = whitespace
            self.line = 1
            self.pos = 0
    
        def find(self, needle: str, start: int = 0, end: Optional[int] = None) -> int:
            assert start >= 0, start
            pos = self.pos
            start += pos
            if end is None:
                index = self.text.find(needle, start)
            else:
                end += pos
                assert end >= start
                index = self.text.find(needle, start, end)
            if index != -1:
                index -= pos
            return index
    
        def consume(self, count: Optional[int] = None) -> str:
            if count is None:
                count = len(self.text) - self.pos
            newpos = self.pos + count
            self.line += self.text.count("\n", self.pos, newpos)
            s = self.text[self.pos : newpos]
            self.pos = newpos
            return s
    
        def remaining(self) -> int:
            return len(self.text) - self.pos
    
        def __len__(self) -> int:
            return self.remaining()
    
        def __getitem__(self, key: Union[int, slice]) -> str:
            if isinstance(key, slice):
                size = len(self)
                start, stop, step = key.indices(size)
                if start is None:
                    start = self.pos
                else:
                    start += self.pos
                if stop is not None:
                    stop += self.pos
                return self.text[slice(start, stop, step)]
            elif key < 0:
                return self.text[key]
            else:
                return self.text[self.pos + key]
    
        def __str__(self) -> str:
            return self.text[self.pos :]
    
        def raise_parse_error(self, msg: str) -> None:
            raise ParseError(msg, self.name, self.line)
    
    
    def _format_code(code: str) -> str:
        lines = code.splitlines()
        format = "%%%dd  %%s\n" % len(repr(len(lines) + 1))
        return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
    
    
    def _parse(
        reader: _TemplateReader,
        template: Template,
        in_block: Optional[str] = None,
        in_loop: Optional[str] = None,
    ) -> _ChunkList:
        body = _ChunkList([])
        while True:
            # Find next template directive
            curly = 0
            while True:
                curly = reader.find("{", curly)
                if curly == -1 or curly + 1 == reader.remaining():
                    # EOF
                    if in_block:
                        reader.raise_parse_error(
                            "Missing {%% end %%} block for %s" % in_block
                        )
                    body.chunks.append(
                        _Text(reader.consume(), reader.line, reader.whitespace)
                    )
                    return body
                # If the first curly brace is not the start of a special token,
                # start searching from the character after it
                if reader[curly + 1] not in ("{", "%", "#"):
                    curly += 1
                    continue
                # When there are more than 2 curlies in a row, use the
                # innermost ones.  This is useful when generating languages
                # like latex where curlies are also meaningful
                if (
                    curly + 2 < reader.remaining()
                    and reader[curly + 1] == "{"
                    and reader[curly + 2] == "{"
                ):
                    curly += 1
                    continue
                break
    
            # Append any text before the special token
            if curly > 0:
                cons = reader.consume(curly)
                body.chunks.append(_Text(cons, reader.line, reader.whitespace))
    
            start_brace = reader.consume(2)
            line = reader.line
    
            # Template directives may be escaped as "{{!" or "{%!".
            # In this case output the braces and consume the "!".
            # This is especially useful in conjunction with jquery templates,
            # which also use double braces.
            if reader.remaining() and reader[0] == "!":
                reader.consume(1)
                body.chunks.append(_Text(start_brace, line, reader.whitespace))
                continue
    
            # Comment
            if start_brace == "{#":
                end = reader.find("#}")
                if end == -1:
                    reader.raise_parse_error("Missing end comment #}")
                contents = reader.consume(end).strip()
                reader.consume(2)
                continue
    
            # Expression
            if start_brace == "{{":
                end = reader.find("}}")
                if end == -1:
                    reader.raise_parse_error("Missing end expression }}")
                contents = reader.consume(end).strip()
                reader.consume(2)
                if not contents:
                    reader.raise_parse_error("Empty expression")
                body.chunks.append(_Expression(contents, line))
                continue
    
            # Block
            assert start_brace == "{%", start_brace
            end = reader.find("%}")
            if end == -1:
                reader.raise_parse_error("Missing end block %}")
            contents = reader.consume(end).strip()
            reader.consume(2)
            if not contents:
                reader.raise_parse_error("Empty block tag ({% %})")
    
            operator, space, suffix = contents.partition(" ")
            suffix = suffix.strip()
    
            # Intermediate ("else", "elif", etc) blocks
            intermediate_blocks = {
                "else": set(["if", "for", "while", "try"]),
                "elif": set(["if"]),
                "except": set(["try"]),
                "finally": set(["try"]),
            }
            allowed_parents = intermediate_blocks.get(operator)
            if allowed_parents is not None:
                if not in_block:
                    reader.raise_parse_error(
                        "%s outside %s block" % (operator, allowed_parents)
                    )
                if in_block not in allowed_parents:
                    reader.raise_parse_error(
                        "%s block cannot be attached to %s block" % (operator, in_block)
                    )
                body.chunks.append(_IntermediateControlBlock(contents, line))
                continue
    
            # End tag
            elif operator == "end":
                if not in_block:
                    reader.raise_parse_error("Extra {% end %} block")
                return body
    
            elif operator in (
                "extends",
                "include",
                "set",
                "import",
                "from",
                "comment",
                "autoescape",
                "whitespace",
                "raw",
                "module",
            ):
                if operator == "comment":
                    continue
                if operator == "extends":
                    suffix = suffix.strip('"').strip("'")
                    if not suffix:
                        reader.raise_parse_error("extends missing file path")
                    block = _ExtendsBlock(suffix)  # type: _Node
                elif operator in ("import", "from"):
                    if not suffix:
                        reader.raise_parse_error("import missing statement")
                    block = _Statement(contents, line)
                elif operator == "include":
                    suffix = suffix.strip('"').strip("'")
                    if not suffix:
                        reader.raise_parse_error("include missing file path")
                    block = _IncludeBlock(suffix, reader, line)
                elif operator == "set":
                    if not suffix:
                        reader.raise_parse_error("set missing statement")
                    block = _Statement(suffix, line)
                elif operator == "autoescape":
                    fn = suffix.strip()  # type: Optional[str]
                    if fn == "None":
                        fn = None
                    template.autoescape = fn
                    continue
                elif operator == "whitespace":
                    mode = suffix.strip()
                    # Validate the selected mode
                    filter_whitespace(mode, "")
                    reader.whitespace = mode
                    continue
                elif operator == "raw":
                    block = _Expression(suffix, line, raw=True)
                elif operator == "module":
                    block = _Module(suffix, line)
                body.chunks.append(block)
                continue
    
            elif operator in ("apply", "block", "try", "if", "for", "while"):
                # parse inner body recursively
                if operator in ("for", "while"):
                    block_body = _parse(reader, template, operator, operator)
                elif operator == "apply":
                    # apply creates a nested function so syntactically it's not
                    # in the loop.
                    block_body = _parse(reader, template, operator, None)
                else:
                    block_body = _parse(reader, template, operator, in_loop)
    
                if operator == "apply":
                    if not suffix:
                        reader.raise_parse_error("apply missing method name")
                    block = _ApplyBlock(suffix, line, block_body)
                elif operator == "block":
                    if not suffix:
                        reader.raise_parse_error("block missing name")
                    block = _NamedBlock(suffix, block_body, template, line)
                else:
                    block = _ControlBlock(contents, line, block_body)
                body.chunks.append(block)
                continue
    
            elif operator in ("break", "continue"):
                if not in_loop:
                    reader.raise_parse_error(
                        "%s outside %s block" % (operator, set(["for", "while"]))
                    )
                body.chunks.append(_Statement(contents, line))
                continue
    
            else:
                reader.raise_parse_error("unknown operator: %r" % operator)
    tornado-6.1.0/tornado/test/000077500000000000000000000000001374705040500156165ustar00rootroot00000000000000tornado-6.1.0/tornado/test/__main__.py000066400000000000000000000005171374705040500177130ustar00rootroot00000000000000"""Shim to allow python -m tornado.test.
    
    This only works in python 2.7+.
    """
    from tornado.test.runtests import all, main
    
    # tornado.testing.main autodiscovery relies on 'all' being present in
    # the main module, so import it here even though it is not used directly.
    # The following line prevents a pyflakes warning.
    all = all
    
    main()
    tornado-6.1.0/tornado/test/asyncio_test.py000066400000000000000000000157631374705040500207100ustar00rootroot00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may
    # not use this file except in compliance with the License. You may obtain
    # a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    # License for the specific language governing permissions and limitations
    # under the License.
    
    import asyncio
    import unittest
    
    from concurrent.futures import ThreadPoolExecutor
    from tornado import gen
    from tornado.ioloop import IOLoop
    from tornado.platform.asyncio import (
        AsyncIOLoop,
        to_asyncio_future,
        AnyThreadEventLoopPolicy,
    )
    from tornado.testing import AsyncTestCase, gen_test
    
    
    class AsyncIOLoopTest(AsyncTestCase):
        def get_new_ioloop(self):
            io_loop = AsyncIOLoop()
            return io_loop
    
        def test_asyncio_callback(self):
            # Basic test that the asyncio loop is set up correctly.
            asyncio.get_event_loop().call_soon(self.stop)
            self.wait()
    
        @gen_test
        def test_asyncio_future(self):
            # Test that we can yield an asyncio future from a tornado coroutine.
            # Without 'yield from', we must wrap coroutines in ensure_future,
            # which was introduced during Python 3.4, deprecating the prior "async".
            if hasattr(asyncio, "ensure_future"):
                ensure_future = asyncio.ensure_future
            else:
                # async is a reserved word in Python 3.7
                ensure_future = getattr(asyncio, "async")
    
            x = yield ensure_future(
                asyncio.get_event_loop().run_in_executor(None, lambda: 42)
            )
            self.assertEqual(x, 42)
    
        @gen_test
        def test_asyncio_yield_from(self):
            @gen.coroutine
            def f():
                event_loop = asyncio.get_event_loop()
                x = yield from event_loop.run_in_executor(None, lambda: 42)
                return x
    
            result = yield f()
            self.assertEqual(result, 42)
    
        def test_asyncio_adapter(self):
            # This test demonstrates that when using the asyncio coroutine
            # runner (i.e. run_until_complete), the to_asyncio_future
            # adapter is needed. No adapter is needed in the other direction,
            # as demonstrated by other tests in the package.
            @gen.coroutine
            def tornado_coroutine():
                yield gen.moment
                raise gen.Return(42)
    
            async def native_coroutine_without_adapter():
                return await tornado_coroutine()
    
            async def native_coroutine_with_adapter():
                return await to_asyncio_future(tornado_coroutine())
    
            # Use the adapter, but two degrees from the tornado coroutine.
            async def native_coroutine_with_adapter2():
                return await to_asyncio_future(native_coroutine_without_adapter())
    
            # Tornado supports native coroutines both with and without adapters
            self.assertEqual(self.io_loop.run_sync(native_coroutine_without_adapter), 42)
            self.assertEqual(self.io_loop.run_sync(native_coroutine_with_adapter), 42)
            self.assertEqual(self.io_loop.run_sync(native_coroutine_with_adapter2), 42)
    
            # Asyncio only supports coroutines that yield asyncio-compatible
            # Futures (which our Future is since 5.0).
            self.assertEqual(
                asyncio.get_event_loop().run_until_complete(
                    native_coroutine_without_adapter()
                ),
                42,
            )
            self.assertEqual(
                asyncio.get_event_loop().run_until_complete(
                    native_coroutine_with_adapter()
                ),
                42,
            )
            self.assertEqual(
                asyncio.get_event_loop().run_until_complete(
                    native_coroutine_with_adapter2()
                ),
                42,
            )
    
    
    class LeakTest(unittest.TestCase):
        def setUp(self):
            # Trigger a cleanup of the mapping so we start with a clean slate.
            AsyncIOLoop().close()
            # If we don't clean up after ourselves other tests may fail on
            # py34.
            self.orig_policy = asyncio.get_event_loop_policy()
            asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())
    
        def tearDown(self):
            asyncio.get_event_loop().close()
            asyncio.set_event_loop_policy(self.orig_policy)
    
        def test_ioloop_close_leak(self):
            orig_count = len(IOLoop._ioloop_for_asyncio)
            for i in range(10):
                # Create and close an AsyncIOLoop using Tornado interfaces.
                loop = AsyncIOLoop()
                loop.close()
            new_count = len(IOLoop._ioloop_for_asyncio) - orig_count
            self.assertEqual(new_count, 0)
    
        def test_asyncio_close_leak(self):
            orig_count = len(IOLoop._ioloop_for_asyncio)
            for i in range(10):
                # Create and close an AsyncIOMainLoop using asyncio interfaces.
                loop = asyncio.new_event_loop()
                loop.call_soon(IOLoop.current)
                loop.call_soon(loop.stop)
                loop.run_forever()
                loop.close()
            new_count = len(IOLoop._ioloop_for_asyncio) - orig_count
            # Because the cleanup is run on new loop creation, we have one
            # dangling entry in the map (but only one).
            self.assertEqual(new_count, 1)
    
    
    class AnyThreadEventLoopPolicyTest(unittest.TestCase):
        def setUp(self):
            self.orig_policy = asyncio.get_event_loop_policy()
            self.executor = ThreadPoolExecutor(1)
    
        def tearDown(self):
            asyncio.set_event_loop_policy(self.orig_policy)
            self.executor.shutdown()
    
        def get_event_loop_on_thread(self):
            def get_and_close_event_loop():
                """Get the event loop. Close it if one is returned.
    
                Returns the (closed) event loop. This is a silly thing
                to do and leaves the thread in a broken state, but it's
                enough for this test. Closing the loop avoids resource
                leak warnings.
                """
                loop = asyncio.get_event_loop()
                loop.close()
                return loop
    
            future = self.executor.submit(get_and_close_event_loop)
            return future.result()
    
        def run_policy_test(self, accessor, expected_type):
            # With the default policy, non-main threads don't get an event
            # loop.
            self.assertRaises(
                (RuntimeError, AssertionError), self.executor.submit(accessor).result
            )
            # Set the policy and we can get a loop.
            asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
            self.assertIsInstance(self.executor.submit(accessor).result(), expected_type)
            # Clean up to silence leak warnings. Always use asyncio since
            # IOLoop doesn't (currently) close the underlying loop.
            self.executor.submit(lambda: asyncio.get_event_loop().close()).result()  # type: ignore
    
        def test_asyncio_accessor(self):
            self.run_policy_test(asyncio.get_event_loop, asyncio.AbstractEventLoop)
    
        def test_tornado_accessor(self):
            self.run_policy_test(IOLoop.current, IOLoop)
    tornado-6.1.0/tornado/test/auth_test.py000066400000000000000000000556151374705040500202040ustar00rootroot00000000000000# These tests do not currently do much to verify the correct implementation
    # of the openid/oauth protocols, they just exercise the major code paths
    # and ensure that it doesn't blow up (e.g. with unicode/bytes issues in
    # python 3)
    
    import unittest
    
    from tornado.auth import (
        OpenIdMixin,
        OAuthMixin,
        OAuth2Mixin,
        GoogleOAuth2Mixin,
        FacebookGraphMixin,
        TwitterMixin,
    )
    from tornado.escape import json_decode
    from tornado import gen
    from tornado.httpclient import HTTPClientError
    from tornado.httputil import url_concat
    from tornado.log import app_log
    from tornado.testing import AsyncHTTPTestCase, ExpectLog
    from tornado.web import RequestHandler, Application, HTTPError
    
    try:
        from unittest import mock
    except ImportError:
        mock = None  # type: ignore
    
    
    class OpenIdClientLoginHandler(RequestHandler, OpenIdMixin):
        def initialize(self, test):
            self._OPENID_ENDPOINT = test.get_url("/openid/server/authenticate")
    
        @gen.coroutine
        def get(self):
            if self.get_argument("openid.mode", None):
                user = yield self.get_authenticated_user(
                    http_client=self.settings["http_client"]
                )
                if user is None:
                    raise Exception("user is None")
                self.finish(user)
                return
            res = self.authenticate_redirect()  # type: ignore
            assert res is None
    
    
    class OpenIdServerAuthenticateHandler(RequestHandler):
        def post(self):
            if self.get_argument("openid.mode") != "check_authentication":
                raise Exception("incorrect openid.mode %r")
            self.write("is_valid:true")
    
    
    class OAuth1ClientLoginHandler(RequestHandler, OAuthMixin):
        def initialize(self, test, version):
            self._OAUTH_VERSION = version
            self._OAUTH_REQUEST_TOKEN_URL = test.get_url("/oauth1/server/request_token")
            self._OAUTH_AUTHORIZE_URL = test.get_url("/oauth1/server/authorize")
            self._OAUTH_ACCESS_TOKEN_URL = test.get_url("/oauth1/server/access_token")
    
        def _oauth_consumer_token(self):
            return dict(key="asdf", secret="qwer")
    
        @gen.coroutine
        def get(self):
            if self.get_argument("oauth_token", None):
                user = yield self.get_authenticated_user(
                    http_client=self.settings["http_client"]
                )
                if user is None:
                    raise Exception("user is None")
                self.finish(user)
                return
            yield self.authorize_redirect(http_client=self.settings["http_client"])
    
        @gen.coroutine
        def _oauth_get_user_future(self, access_token):
            if self.get_argument("fail_in_get_user", None):
                raise Exception("failing in get_user")
            if access_token != dict(key="uiop", secret="5678"):
                raise Exception("incorrect access token %r" % access_token)
            return dict(email="foo@example.com")
    
    
    class OAuth1ClientLoginCoroutineHandler(OAuth1ClientLoginHandler):
        """Replaces OAuth1ClientLoginCoroutineHandler's get() with a coroutine."""
    
        @gen.coroutine
        def get(self):
            if self.get_argument("oauth_token", None):
                # Ensure that any exceptions are set on the returned Future,
                # not simply thrown into the surrounding StackContext.
                try:
                    yield self.get_authenticated_user()
                except Exception as e:
                    self.set_status(503)
                    self.write("got exception: %s" % e)
            else:
                yield self.authorize_redirect()
    
    
    class OAuth1ClientRequestParametersHandler(RequestHandler, OAuthMixin):
        def initialize(self, version):
            self._OAUTH_VERSION = version
    
        def _oauth_consumer_token(self):
            return dict(key="asdf", secret="qwer")
    
        def get(self):
            params = self._oauth_request_parameters(
                "http://www.example.com/api/asdf",
                dict(key="uiop", secret="5678"),
                parameters=dict(foo="bar"),
            )
            self.write(params)
    
    
    class OAuth1ServerRequestTokenHandler(RequestHandler):
        def get(self):
            self.write("oauth_token=zxcv&oauth_token_secret=1234")
    
    
    class OAuth1ServerAccessTokenHandler(RequestHandler):
        def get(self):
            self.write("oauth_token=uiop&oauth_token_secret=5678")
    
    
    class OAuth2ClientLoginHandler(RequestHandler, OAuth2Mixin):
        def initialize(self, test):
            self._OAUTH_AUTHORIZE_URL = test.get_url("/oauth2/server/authorize")
    
        def get(self):
            res = self.authorize_redirect()  # type: ignore
            assert res is None
    
    
    class FacebookClientLoginHandler(RequestHandler, FacebookGraphMixin):
        def initialize(self, test):
            self._OAUTH_AUTHORIZE_URL = test.get_url("/facebook/server/authorize")
            self._OAUTH_ACCESS_TOKEN_URL = test.get_url("/facebook/server/access_token")
            self._FACEBOOK_BASE_URL = test.get_url("/facebook/server")
    
        @gen.coroutine
        def get(self):
            if self.get_argument("code", None):
                user = yield self.get_authenticated_user(
                    redirect_uri=self.request.full_url(),
                    client_id=self.settings["facebook_api_key"],
                    client_secret=self.settings["facebook_secret"],
                    code=self.get_argument("code"),
                )
                self.write(user)
            else:
                self.authorize_redirect(
                    redirect_uri=self.request.full_url(),
                    client_id=self.settings["facebook_api_key"],
                    extra_params={"scope": "read_stream,offline_access"},
                )
    
    
    class FacebookServerAccessTokenHandler(RequestHandler):
        def get(self):
            self.write(dict(access_token="asdf", expires_in=3600))
    
    
    class FacebookServerMeHandler(RequestHandler):
        def get(self):
            self.write("{}")
    
    
    class TwitterClientHandler(RequestHandler, TwitterMixin):
        def initialize(self, test):
            self._OAUTH_REQUEST_TOKEN_URL = test.get_url("/oauth1/server/request_token")
            self._OAUTH_ACCESS_TOKEN_URL = test.get_url("/twitter/server/access_token")
            self._OAUTH_AUTHORIZE_URL = test.get_url("/oauth1/server/authorize")
            self._OAUTH_AUTHENTICATE_URL = test.get_url("/twitter/server/authenticate")
            self._TWITTER_BASE_URL = test.get_url("/twitter/api")
    
        def get_auth_http_client(self):
            return self.settings["http_client"]
    
    
    class TwitterClientLoginHandler(TwitterClientHandler):
        @gen.coroutine
        def get(self):
            if self.get_argument("oauth_token", None):
                user = yield self.get_authenticated_user()
                if user is None:
                    raise Exception("user is None")
                self.finish(user)
                return
            yield self.authorize_redirect()
    
    
    class TwitterClientAuthenticateHandler(TwitterClientHandler):
        # Like TwitterClientLoginHandler, but uses authenticate_redirect
        # instead of authorize_redirect.
        @gen.coroutine
        def get(self):
            if self.get_argument("oauth_token", None):
                user = yield self.get_authenticated_user()
                if user is None:
                    raise Exception("user is None")
                self.finish(user)
                return
            yield self.authenticate_redirect()
    
    
    class TwitterClientLoginGenCoroutineHandler(TwitterClientHandler):
        @gen.coroutine
        def get(self):
            if self.get_argument("oauth_token", None):
                user = yield self.get_authenticated_user()
                self.finish(user)
            else:
                # New style: with @gen.coroutine the result must be yielded
                # or else the request will be auto-finished too soon.
                yield self.authorize_redirect()
    
    
    class TwitterClientShowUserHandler(TwitterClientHandler):
        @gen.coroutine
        def get(self):
            # TODO: would be nice to go through the login flow instead of
            # cheating with a hard-coded access token.
            try:
                response = yield self.twitter_request(
                    "/users/show/%s" % self.get_argument("name"),
                    access_token=dict(key="hjkl", secret="vbnm"),
                )
            except HTTPClientError:
                # TODO(bdarnell): Should we catch HTTP errors and
                # transform some of them (like 403s) into AuthError?
                self.set_status(500)
                self.finish("error from twitter request")
            else:
                self.finish(response)
    
    
    class TwitterServerAccessTokenHandler(RequestHandler):
        def get(self):
            self.write("oauth_token=hjkl&oauth_token_secret=vbnm&screen_name=foo")
    
    
    class TwitterServerShowUserHandler(RequestHandler):
        def get(self, screen_name):
            if screen_name == "error":
                raise HTTPError(500)
            assert "oauth_nonce" in self.request.arguments
            assert "oauth_timestamp" in self.request.arguments
            assert "oauth_signature" in self.request.arguments
            assert self.get_argument("oauth_consumer_key") == "test_twitter_consumer_key"
            assert self.get_argument("oauth_signature_method") == "HMAC-SHA1"
            assert self.get_argument("oauth_version") == "1.0"
            assert self.get_argument("oauth_token") == "hjkl"
            self.write(dict(screen_name=screen_name, name=screen_name.capitalize()))
    
    
    class TwitterServerVerifyCredentialsHandler(RequestHandler):
        def get(self):
            assert "oauth_nonce" in self.request.arguments
            assert "oauth_timestamp" in self.request.arguments
            assert "oauth_signature" in self.request.arguments
            assert self.get_argument("oauth_consumer_key") == "test_twitter_consumer_key"
            assert self.get_argument("oauth_signature_method") == "HMAC-SHA1"
            assert self.get_argument("oauth_version") == "1.0"
            assert self.get_argument("oauth_token") == "hjkl"
            self.write(dict(screen_name="foo", name="Foo"))
    
    
    class AuthTest(AsyncHTTPTestCase):
        def get_app(self):
            return Application(
                [
                    # test endpoints
                    ("/openid/client/login", OpenIdClientLoginHandler, dict(test=self)),
                    (
                        "/oauth10/client/login",
                        OAuth1ClientLoginHandler,
                        dict(test=self, version="1.0"),
                    ),
                    (
                        "/oauth10/client/request_params",
                        OAuth1ClientRequestParametersHandler,
                        dict(version="1.0"),
                    ),
                    (
                        "/oauth10a/client/login",
                        OAuth1ClientLoginHandler,
                        dict(test=self, version="1.0a"),
                    ),
                    (
                        "/oauth10a/client/login_coroutine",
                        OAuth1ClientLoginCoroutineHandler,
                        dict(test=self, version="1.0a"),
                    ),
                    (
                        "/oauth10a/client/request_params",
                        OAuth1ClientRequestParametersHandler,
                        dict(version="1.0a"),
                    ),
                    ("/oauth2/client/login", OAuth2ClientLoginHandler, dict(test=self)),
                    ("/facebook/client/login", FacebookClientLoginHandler, dict(test=self)),
                    ("/twitter/client/login", TwitterClientLoginHandler, dict(test=self)),
                    (
                        "/twitter/client/authenticate",
                        TwitterClientAuthenticateHandler,
                        dict(test=self),
                    ),
                    (
                        "/twitter/client/login_gen_coroutine",
                        TwitterClientLoginGenCoroutineHandler,
                        dict(test=self),
                    ),
                    (
                        "/twitter/client/show_user",
                        TwitterClientShowUserHandler,
                        dict(test=self),
                    ),
                    # simulated servers
                    ("/openid/server/authenticate", OpenIdServerAuthenticateHandler),
                    ("/oauth1/server/request_token", OAuth1ServerRequestTokenHandler),
                    ("/oauth1/server/access_token", OAuth1ServerAccessTokenHandler),
                    ("/facebook/server/access_token", FacebookServerAccessTokenHandler),
                    ("/facebook/server/me", FacebookServerMeHandler),
                    ("/twitter/server/access_token", TwitterServerAccessTokenHandler),
                    (r"/twitter/api/users/show/(.*)\.json", TwitterServerShowUserHandler),
                    (
                        r"/twitter/api/account/verify_credentials\.json",
                        TwitterServerVerifyCredentialsHandler,
                    ),
                ],
                http_client=self.http_client,
                twitter_consumer_key="test_twitter_consumer_key",
                twitter_consumer_secret="test_twitter_consumer_secret",
                facebook_api_key="test_facebook_api_key",
                facebook_secret="test_facebook_secret",
            )
    
        def test_openid_redirect(self):
            response = self.fetch("/openid/client/login", follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue("/openid/server/authenticate?" in response.headers["Location"])
    
        def test_openid_get_user(self):
            response = self.fetch(
                "/openid/client/login?openid.mode=blah"
                "&openid.ns.ax=http://openid.net/srv/ax/1.0"
                "&openid.ax.type.email=http://axschema.org/contact/email"
                "&openid.ax.value.email=foo@example.com"
            )
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed["email"], "foo@example.com")
    
        def test_oauth10_redirect(self):
            response = self.fetch("/oauth10/client/login", follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue(
                response.headers["Location"].endswith(
                    "/oauth1/server/authorize?oauth_token=zxcv"
                )
            )
            # the cookie is base64('zxcv')|base64('1234')
            self.assertTrue(
                '_oauth_request_token="enhjdg==|MTIzNA=="'
                in response.headers["Set-Cookie"],
                response.headers["Set-Cookie"],
            )
    
        def test_oauth10_get_user(self):
            response = self.fetch(
                "/oauth10/client/login?oauth_token=zxcv",
                headers={"Cookie": "_oauth_request_token=enhjdg==|MTIzNA=="},
            )
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed["email"], "foo@example.com")
            self.assertEqual(parsed["access_token"], dict(key="uiop", secret="5678"))
    
        def test_oauth10_request_parameters(self):
            response = self.fetch("/oauth10/client/request_params")
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed["oauth_consumer_key"], "asdf")
            self.assertEqual(parsed["oauth_token"], "uiop")
            self.assertTrue("oauth_nonce" in parsed)
            self.assertTrue("oauth_signature" in parsed)
    
        def test_oauth10a_redirect(self):
            response = self.fetch("/oauth10a/client/login", follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue(
                response.headers["Location"].endswith(
                    "/oauth1/server/authorize?oauth_token=zxcv"
                )
            )
            # the cookie is base64('zxcv')|base64('1234')
            self.assertTrue(
                '_oauth_request_token="enhjdg==|MTIzNA=="'
                in response.headers["Set-Cookie"],
                response.headers["Set-Cookie"],
            )
    
        @unittest.skipIf(mock is None, "mock package not present")
        def test_oauth10a_redirect_error(self):
            with mock.patch.object(OAuth1ServerRequestTokenHandler, "get") as get:
                get.side_effect = Exception("boom")
                with ExpectLog(app_log, "Uncaught exception"):
                    response = self.fetch("/oauth10a/client/login", follow_redirects=False)
                self.assertEqual(response.code, 500)
    
        def test_oauth10a_get_user(self):
            response = self.fetch(
                "/oauth10a/client/login?oauth_token=zxcv",
                headers={"Cookie": "_oauth_request_token=enhjdg==|MTIzNA=="},
            )
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed["email"], "foo@example.com")
            self.assertEqual(parsed["access_token"], dict(key="uiop", secret="5678"))
    
        def test_oauth10a_request_parameters(self):
            response = self.fetch("/oauth10a/client/request_params")
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed["oauth_consumer_key"], "asdf")
            self.assertEqual(parsed["oauth_token"], "uiop")
            self.assertTrue("oauth_nonce" in parsed)
            self.assertTrue("oauth_signature" in parsed)
    
        def test_oauth10a_get_user_coroutine_exception(self):
            response = self.fetch(
                "/oauth10a/client/login_coroutine?oauth_token=zxcv&fail_in_get_user=true",
                headers={"Cookie": "_oauth_request_token=enhjdg==|MTIzNA=="},
            )
            self.assertEqual(response.code, 503)
    
        def test_oauth2_redirect(self):
            response = self.fetch("/oauth2/client/login", follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue("/oauth2/server/authorize?" in response.headers["Location"])
    
        def test_facebook_login(self):
            response = self.fetch("/facebook/client/login", follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue("/facebook/server/authorize?" in response.headers["Location"])
            response = self.fetch(
                "/facebook/client/login?code=1234", follow_redirects=False
            )
            self.assertEqual(response.code, 200)
            user = json_decode(response.body)
            self.assertEqual(user["access_token"], "asdf")
            self.assertEqual(user["session_expires"], "3600")
    
        def base_twitter_redirect(self, url):
            # Same as test_oauth10a_redirect
            response = self.fetch(url, follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue(
                response.headers["Location"].endswith(
                    "/oauth1/server/authorize?oauth_token=zxcv"
                )
            )
            # the cookie is base64('zxcv')|base64('1234')
            self.assertTrue(
                '_oauth_request_token="enhjdg==|MTIzNA=="'
                in response.headers["Set-Cookie"],
                response.headers["Set-Cookie"],
            )
    
        def test_twitter_redirect(self):
            self.base_twitter_redirect("/twitter/client/login")
    
        def test_twitter_redirect_gen_coroutine(self):
            self.base_twitter_redirect("/twitter/client/login_gen_coroutine")
    
        def test_twitter_authenticate_redirect(self):
            response = self.fetch("/twitter/client/authenticate", follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue(
                response.headers["Location"].endswith(
                    "/twitter/server/authenticate?oauth_token=zxcv"
                ),
                response.headers["Location"],
            )
            # the cookie is base64('zxcv')|base64('1234')
            self.assertTrue(
                '_oauth_request_token="enhjdg==|MTIzNA=="'
                in response.headers["Set-Cookie"],
                response.headers["Set-Cookie"],
            )
    
        def test_twitter_get_user(self):
            response = self.fetch(
                "/twitter/client/login?oauth_token=zxcv",
                headers={"Cookie": "_oauth_request_token=enhjdg==|MTIzNA=="},
            )
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(
                parsed,
                {
                    u"access_token": {
                        u"key": u"hjkl",
                        u"screen_name": u"foo",
                        u"secret": u"vbnm",
                    },
                    u"name": u"Foo",
                    u"screen_name": u"foo",
                    u"username": u"foo",
                },
            )
    
        def test_twitter_show_user(self):
            response = self.fetch("/twitter/client/show_user?name=somebody")
            response.rethrow()
            self.assertEqual(
                json_decode(response.body), {"name": "Somebody", "screen_name": "somebody"}
            )
    
        def test_twitter_show_user_error(self):
            response = self.fetch("/twitter/client/show_user?name=error")
            self.assertEqual(response.code, 500)
            self.assertEqual(response.body, b"error from twitter request")
    
    
    class GoogleLoginHandler(RequestHandler, GoogleOAuth2Mixin):
        def initialize(self, test):
            self.test = test
            self._OAUTH_REDIRECT_URI = test.get_url("/client/login")
            self._OAUTH_AUTHORIZE_URL = test.get_url("/google/oauth2/authorize")
            self._OAUTH_ACCESS_TOKEN_URL = test.get_url("/google/oauth2/token")
    
        @gen.coroutine
        def get(self):
            code = self.get_argument("code", None)
            if code is not None:
                # retrieve authenticate google user
                access = yield self.get_authenticated_user(self._OAUTH_REDIRECT_URI, code)
                user = yield self.oauth2_request(
                    self.test.get_url("/google/oauth2/userinfo"),
                    access_token=access["access_token"],
                )
                # return the user and access token as json
                user["access_token"] = access["access_token"]
                self.write(user)
            else:
                self.authorize_redirect(
                    redirect_uri=self._OAUTH_REDIRECT_URI,
                    client_id=self.settings["google_oauth"]["key"],
                    client_secret=self.settings["google_oauth"]["secret"],
                    scope=["profile", "email"],
                    response_type="code",
                    extra_params={"prompt": "select_account"},
                )
    
    
    class GoogleOAuth2AuthorizeHandler(RequestHandler):
        def get(self):
            # issue a fake auth code and redirect to redirect_uri
            code = "fake-authorization-code"
            self.redirect(url_concat(self.get_argument("redirect_uri"), dict(code=code)))
    
    
    class GoogleOAuth2TokenHandler(RequestHandler):
        def post(self):
            assert self.get_argument("code") == "fake-authorization-code"
            # issue a fake token
            self.finish(
                {"access_token": "fake-access-token", "expires_in": "never-expires"}
            )
    
    
    class GoogleOAuth2UserinfoHandler(RequestHandler):
        def get(self):
            assert self.get_argument("access_token") == "fake-access-token"
            # return a fake user
            self.finish({"name": "Foo", "email": "foo@example.com"})
    
    
    class GoogleOAuth2Test(AsyncHTTPTestCase):
        def get_app(self):
            return Application(
                [
                    # test endpoints
                    ("/client/login", GoogleLoginHandler, dict(test=self)),
                    # simulated google authorization server endpoints
                    ("/google/oauth2/authorize", GoogleOAuth2AuthorizeHandler),
                    ("/google/oauth2/token", GoogleOAuth2TokenHandler),
                    ("/google/oauth2/userinfo", GoogleOAuth2UserinfoHandler),
                ],
                google_oauth={
                    "key": "fake_google_client_id",
                    "secret": "fake_google_client_secret",
                },
            )
    
        def test_google_login(self):
            response = self.fetch("/client/login")
            self.assertDictEqual(
                {
                    u"name": u"Foo",
                    u"email": u"foo@example.com",
                    u"access_token": u"fake-access-token",
                },
                json_decode(response.body),
            )
    tornado-6.1.0/tornado/test/autoreload_test.py000066400000000000000000000075541374705040500214010ustar00rootroot00000000000000import os
    import shutil
    import subprocess
    from subprocess import Popen
    import sys
    from tempfile import mkdtemp
    import time
    import unittest
    
    
    class AutoreloadTest(unittest.TestCase):
        def setUp(self):
            self.path = mkdtemp()
    
        def tearDown(self):
            try:
                shutil.rmtree(self.path)
            except OSError:
                # Windows disallows deleting files that are in use by
                # another process, and even though we've waited for our
                # child process below, it appears that its lock on these
                # files is not guaranteed to be released by this point.
                # Sleep and try again (once).
                time.sleep(1)
                shutil.rmtree(self.path)
    
        def test_reload_module(self):
            main = """\
    import os
    import sys
    
    from tornado import autoreload
    
    # This import will fail if path is not set up correctly
    import testapp
    
    print('Starting')
    if 'TESTAPP_STARTED' not in os.environ:
        os.environ['TESTAPP_STARTED'] = '1'
        sys.stdout.flush()
        autoreload._reload()
    """
    
            # Create temporary test application
            os.mkdir(os.path.join(self.path, "testapp"))
            open(os.path.join(self.path, "testapp/__init__.py"), "w").close()
            with open(os.path.join(self.path, "testapp/__main__.py"), "w") as f:
                f.write(main)
    
            # Make sure the tornado module under test is available to the test
            # application
            pythonpath = os.getcwd()
            if "PYTHONPATH" in os.environ:
                pythonpath += os.pathsep + os.environ["PYTHONPATH"]
    
            p = Popen(
                [sys.executable, "-m", "testapp"],
                stdout=subprocess.PIPE,
                cwd=self.path,
                env=dict(os.environ, PYTHONPATH=pythonpath),
                universal_newlines=True,
            )
            out = p.communicate()[0]
            self.assertEqual(out, "Starting\nStarting\n")
    
        def test_reload_wrapper_preservation(self):
            # This test verifies that when `python -m tornado.autoreload`
            # is used on an application that also has an internal
            # autoreload, the reload wrapper is preserved on restart.
            main = """\
    import os
    import sys
    
    # This import will fail if path is not set up correctly
    import testapp
    
    if 'tornado.autoreload' not in sys.modules:
        raise Exception('started without autoreload wrapper')
    
    import tornado.autoreload
    
    print('Starting')
    sys.stdout.flush()
    if 'TESTAPP_STARTED' not in os.environ:
        os.environ['TESTAPP_STARTED'] = '1'
        # Simulate an internal autoreload (one not caused
        # by the wrapper).
        tornado.autoreload._reload()
    else:
        # Exit directly so autoreload doesn't catch it.
        os._exit(0)
    """
    
            # Create temporary test application
            os.mkdir(os.path.join(self.path, "testapp"))
            init_file = os.path.join(self.path, "testapp", "__init__.py")
            open(init_file, "w").close()
            main_file = os.path.join(self.path, "testapp", "__main__.py")
            with open(main_file, "w") as f:
                f.write(main)
    
            # Make sure the tornado module under test is available to the test
            # application
            pythonpath = os.getcwd()
            if "PYTHONPATH" in os.environ:
                pythonpath += os.pathsep + os.environ["PYTHONPATH"]
    
            autoreload_proc = Popen(
                [sys.executable, "-m", "tornado.autoreload", "-m", "testapp"],
                stdout=subprocess.PIPE,
                cwd=self.path,
                env=dict(os.environ, PYTHONPATH=pythonpath),
                universal_newlines=True,
            )
    
            # This timeout needs to be fairly generous for pypy due to jit
            # warmup costs.
            for i in range(40):
                if autoreload_proc.poll() is not None:
                    break
                time.sleep(0.1)
            else:
                autoreload_proc.kill()
                raise Exception("subprocess failed to terminate")
    
            out = autoreload_proc.communicate()[0]
            self.assertEqual(out, "Starting\n" * 2)
    tornado-6.1.0/tornado/test/concurrent_test.py000066400000000000000000000136431374705040500214200ustar00rootroot00000000000000#
    # Copyright 2012 Facebook
    #
    # Licensed under the Apache License, Version 2.0 (the "License"); you may
    # not use this file except in compliance with the License. You may obtain
    # a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    # License for the specific language governing permissions and limitations
    # under the License.
    from concurrent import futures
    import logging
    import re
    import socket
    import typing
    import unittest
    
    from tornado.concurrent import (
        Future,
        run_on_executor,
        future_set_result_unless_cancelled,
    )
    from tornado.escape import utf8, to_unicode
    from tornado import gen
    from tornado.iostream import IOStream
    from tornado.tcpserver import TCPServer
    from tornado.testing import AsyncTestCase, bind_unused_port, gen_test
    
    
    class MiscFutureTest(AsyncTestCase):
        def test_future_set_result_unless_cancelled(self):
            fut = Future()  # type: Future[int]
            future_set_result_unless_cancelled(fut, 42)
            self.assertEqual(fut.result(), 42)
            self.assertFalse(fut.cancelled())
    
            fut = Future()
            fut.cancel()
            is_cancelled = fut.cancelled()
            future_set_result_unless_cancelled(fut, 42)
            self.assertEqual(fut.cancelled(), is_cancelled)
            if not is_cancelled:
                self.assertEqual(fut.result(), 42)
    
    
    # The following series of classes demonstrate and test various styles
    # of use, with and without generators and futures.
    
    
    class CapServer(TCPServer):
        @gen.coroutine
        def handle_stream(self, stream, address):
            data = yield stream.read_until(b"\n")
            data = to_unicode(data)
            if data == data.upper():
                stream.write(b"error\talready capitalized\n")
            else:
                # data already has \n
                stream.write(utf8("ok\t%s" % data.upper()))
            stream.close()
    
    
    class CapError(Exception):
        pass
    
    
    class BaseCapClient(object):
        def __init__(self, port):
            self.port = port
    
        def process_response(self, data):
            m = re.match("(.*)\t(.*)\n", to_unicode(data))
            if m is None:
                raise Exception("did not match")
            status, message = m.groups()
            if status == "ok":
                return message
            else:
                raise CapError(message)
    
    
    class GeneratorCapClient(BaseCapClient):
        @gen.coroutine
        def capitalize(self, request_data):
            logging.debug("capitalize")
            stream = IOStream(socket.socket())
            logging.debug("connecting")
            yield stream.connect(("127.0.0.1", self.port))
            stream.write(utf8(request_data + "\n"))
            logging.debug("reading")
            data = yield stream.read_until(b"\n")
            logging.debug("returning")
            stream.close()
            raise gen.Return(self.process_response(data))
    
    
    class ClientTestMixin(object):
        client_class = None  # type: typing.Callable
    
        def setUp(self):
            super().setUp()  # type: ignore
            self.server = CapServer()
            sock, port = bind_unused_port()
            self.server.add_sockets([sock])
            self.client = self.client_class(port=port)
    
        def tearDown(self):
            self.server.stop()
            super().tearDown()  # type: ignore
    
        def test_future(self: typing.Any):
            future = self.client.capitalize("hello")
            self.io_loop.add_future(future, self.stop)
            self.wait()
            self.assertEqual(future.result(), "HELLO")
    
        def test_future_error(self: typing.Any):
            future = self.client.capitalize("HELLO")
            self.io_loop.add_future(future, self.stop)
            self.wait()
            self.assertRaisesRegexp(CapError, "already capitalized", future.result)  # type: ignore
    
        def test_generator(self: typing.Any):
            @gen.coroutine
            def f():
                result = yield self.client.capitalize("hello")
                self.assertEqual(result, "HELLO")
    
            self.io_loop.run_sync(f)
    
        def test_generator_error(self: typing.Any):
            @gen.coroutine
            def f():
                with self.assertRaisesRegexp(CapError, "already capitalized"):
                    yield self.client.capitalize("HELLO")
    
            self.io_loop.run_sync(f)
    
    
    class GeneratorClientTest(ClientTestMixin, AsyncTestCase):
        client_class = GeneratorCapClient
    
    
    class RunOnExecutorTest(AsyncTestCase):
        @gen_test
        def test_no_calling(self):
            class Object(object):
                def __init__(self):
                    self.executor = futures.thread.ThreadPoolExecutor(1)
    
                @run_on_executor
                def f(self):
                    return 42
    
            o = Object()
            answer = yield o.f()
            self.assertEqual(answer, 42)
    
        @gen_test
        def test_call_with_no_args(self):
            class Object(object):
                def __init__(self):
                    self.executor = futures.thread.ThreadPoolExecutor(1)
    
                @run_on_executor()
                def f(self):
                    return 42
    
            o = Object()
            answer = yield o.f()
            self.assertEqual(answer, 42)
    
        @gen_test
        def test_call_with_executor(self):
            class Object(object):
                def __init__(self):
                    self.__executor = futures.thread.ThreadPoolExecutor(1)
    
                @run_on_executor(executor="_Object__executor")
                def f(self):
                    return 42
    
            o = Object()
            answer = yield o.f()
            self.assertEqual(answer, 42)
    
        @gen_test
        def test_async_await(self):
            class Object(object):
                def __init__(self):
                    self.executor = futures.thread.ThreadPoolExecutor(1)
    
                @run_on_executor()
                def f(self):
                    return 42
    
            o = Object()
    
            async def f():
                answer = await o.f()
                return answer
    
            result = yield f()
            self.assertEqual(result, 42)
    
    
    if __name__ == "__main__":
        unittest.main()
    tornado-6.1.0/tornado/test/csv_translations/000077500000000000000000000000001374705040500212125ustar00rootroot00000000000000tornado-6.1.0/tornado/test/csv_translations/fr_FR.csv000066400000000000000000000000221374705040500227170ustar00rootroot00000000000000"school","école"
    tornado-6.1.0/tornado/test/curl_httpclient_test.py000066400000000000000000000103171374705040500224340ustar00rootroot00000000000000from hashlib import md5
    import unittest
    
    from tornado.escape import utf8
    from tornado.testing import AsyncHTTPTestCase
    from tornado.test import httpclient_test
    from tornado.web import Application, RequestHandler
    
    
    try:
        import pycurl
    except ImportError:
        pycurl = None  # type: ignore
    
    if pycurl is not None:
        from tornado.curl_httpclient import CurlAsyncHTTPClient
    
    
    @unittest.skipIf(pycurl is None, "pycurl module not present")
    class CurlHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
        def get_http_client(self):
            client = CurlAsyncHTTPClient(defaults=dict(allow_ipv6=False))
            # make sure AsyncHTTPClient magic doesn't give us the wrong class
            self.assertTrue(isinstance(client, CurlAsyncHTTPClient))
            return client
    
    
    class DigestAuthHandler(RequestHandler):
        def initialize(self, username, password):
            self.username = username
            self.password = password
    
        def get(self):
            realm = "test"
            opaque = "asdf"
            # Real implementations would use a random nonce.
            nonce = "1234"
    
            auth_header = self.request.headers.get("Authorization", None)
            if auth_header is not None:
                auth_mode, params = auth_header.split(" ", 1)
                assert auth_mode == "Digest"
                param_dict = {}
                for pair in params.split(","):
                    k, v = pair.strip().split("=", 1)
                    if v[0] == '"' and v[-1] == '"':
                        v = v[1:-1]
                    param_dict[k] = v
                assert param_dict["realm"] == realm
                assert param_dict["opaque"] == opaque
                assert param_dict["nonce"] == nonce
                assert param_dict["username"] == self.username
                assert param_dict["uri"] == self.request.path
                h1 = md5(
                    utf8("%s:%s:%s" % (self.username, realm, self.password))
                ).hexdigest()
                h2 = md5(
                    utf8("%s:%s" % (self.request.method, self.request.path))
                ).hexdigest()
                digest = md5(utf8("%s:%s:%s" % (h1, nonce, h2))).hexdigest()
                if digest == param_dict["response"]:
                    self.write("ok")
                else:
                    self.write("fail")
            else:
                self.set_status(401)
                self.set_header(
                    "WWW-Authenticate",
                    'Digest realm="%s", nonce="%s", opaque="%s"' % (realm, nonce, opaque),
                )
    
    
    class CustomReasonHandler(RequestHandler):
        def get(self):
            self.set_status(200, "Custom reason")
    
    
    class CustomFailReasonHandler(RequestHandler):
        def get(self):
            self.set_status(400, "Custom reason")
    
    
    @unittest.skipIf(pycurl is None, "pycurl module not present")
    class CurlHTTPClientTestCase(AsyncHTTPTestCase):
        def setUp(self):
            super().setUp()
            self.http_client = self.create_client()
    
        def get_app(self):
            return Application(
                [
                    ("/digest", DigestAuthHandler, {"username": "foo", "password": "bar"}),
                    (
                        "/digest_non_ascii",
                        DigestAuthHandler,
                        {"username": "foo", "password": "barユ£"},
                    ),
                    ("/custom_reason", CustomReasonHandler),
                    ("/custom_fail_reason", CustomFailReasonHandler),
                ]
            )
    
        def create_client(self, **kwargs):
            return CurlAsyncHTTPClient(
                force_instance=True, defaults=dict(allow_ipv6=False), **kwargs
            )
    
        def test_digest_auth(self):
            response = self.fetch(
                "/digest", auth_mode="digest", auth_username="foo", auth_password="bar"
            )
            self.assertEqual(response.body, b"ok")
    
        def test_custom_reason(self):
            response = self.fetch("/custom_reason")
            self.assertEqual(response.reason, "Custom reason")
    
        def test_fail_custom_reason(self):
            response = self.fetch("/custom_fail_reason")
            self.assertEqual(str(response.error), "HTTP 400: Custom reason")
    
        def test_digest_auth_non_ascii(self):
            response = self.fetch(
                "/digest_non_ascii",
                auth_mode="digest",
                auth_username="foo",
                auth_password="barユ£",
            )
            self.assertEqual(response.body, b"ok")
    tornado-6.1.0/tornado/test/escape_test.py000066400000000000000000000301241374705040500204670ustar00rootroot00000000000000import unittest
    
    import tornado.escape
    from tornado.escape import (
        utf8,
        xhtml_escape,
        xhtml_unescape,
        url_escape,
        url_unescape,
        to_unicode,
        json_decode,
        json_encode,
        squeeze,
        recursive_unicode,
    )
    from tornado.util import unicode_type
    
    from typing import List, Tuple, Union, Dict, Any  # noqa: F401
    
    linkify_tests = [
        # (input, linkify_kwargs, expected_output)
        (
            "hello http://world.com/!",
            {},
            u'hello http://world.com/!',
        ),
        (
            "hello http://world.com/with?param=true&stuff=yes",
            {},
            u'hello http://world.com/with?param=true&stuff=yes',  # noqa: E501
        ),
        # an opened paren followed by many chars killed Gruber's regex
        (
            "http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
            {},
            u'http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',  # noqa: E501
        ),
        # as did too many dots at the end
        (
            "http://url.com/withmany.......................................",
            {},
            u'http://url.com/withmany.......................................',  # noqa: E501
        ),
        (
            "http://url.com/withmany((((((((((((((((((((((((((((((((((a)",
            {},
            u'http://url.com/withmany((((((((((((((((((((((((((((((((((a)',  # noqa: E501
        ),
        # some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
        # plus a fex extras (such as multiple parentheses).
        (
            "http://foo.com/blah_blah",
            {},
            u'http://foo.com/blah_blah',
        ),
        (
            "http://foo.com/blah_blah/",
            {},
            u'http://foo.com/blah_blah/',
        ),
        (
            "(Something like http://foo.com/blah_blah)",
            {},
            u'(Something like http://foo.com/blah_blah)',
        ),
        (
            "http://foo.com/blah_blah_(wikipedia)",
            {},
            u'http://foo.com/blah_blah_(wikipedia)',
        ),
        (
            "http://foo.com/blah_(blah)_(wikipedia)_blah",
            {},
            u'http://foo.com/blah_(blah)_(wikipedia)_blah',  # noqa: E501
        ),
        (
            "(Something like http://foo.com/blah_blah_(wikipedia))",
            {},
            u'(Something like http://foo.com/blah_blah_(wikipedia))',  # noqa: E501
        ),
        (
            "http://foo.com/blah_blah.",
            {},
            u'http://foo.com/blah_blah.',
        ),
        (
            "http://foo.com/blah_blah/.",
            {},
            u'http://foo.com/blah_blah/.',
        ),
        (
            "",
            {},
            u'<http://foo.com/blah_blah>',
        ),
        (
            "",
            {},
            u'<http://foo.com/blah_blah/>',
        ),
        (
            "http://foo.com/blah_blah,",
            {},
            u'http://foo.com/blah_blah,',
        ),
        (
            "http://www.example.com/wpstyle/?p=364.",
            {},
            u'http://www.example.com/wpstyle/?p=364.',  # noqa: E501
        ),
        (
            "rdar://1234",
            {"permitted_protocols": ["http", "rdar"]},
            u'rdar://1234',
        ),
        (
            "rdar:/1234",
            {"permitted_protocols": ["rdar"]},
            u'rdar:/1234',
        ),
        (
            "http://userid:password@example.com:8080",
            {},
            u'http://userid:password@example.com:8080',  # noqa: E501
        ),
        (
            "http://userid@example.com",
            {},
            u'http://userid@example.com',
        ),
        (
            "http://userid@example.com:8080",
            {},
            u'http://userid@example.com:8080',
        ),
        (
            "http://userid:password@example.com",
            {},
            u'http://userid:password@example.com',
        ),
        (
            "message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
            {"permitted_protocols": ["http", "message"]},
            u''
            u"message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
        ),
        (
            u"http://\u27a1.ws/\u4a39",
            {},
            u'http://\u27a1.ws/\u4a39',
        ),
        (
            "http://example.com",
            {},
            u'<tag>http://example.com</tag>',
        ),
        (
            "Just a www.example.com link.",
            {},
            u'Just a www.example.com link.',
        ),
        (
            "Just a www.example.com link.",
            {"require_protocol": True},
            u"Just a www.example.com link.",
        ),
        (
            "A http://reallylong.com/link/that/exceedsthelenglimit.html",
            {"require_protocol": True, "shorten": True},
            u'A http://reallylong.com/link...',  # noqa: E501
        ),
        (
            "A http://reallylongdomainnamethatwillbetoolong.com/hi!",
            {"shorten": True},
            u'A http://reallylongdomainnametha...!',  # noqa: E501
        ),
        (
            "A file:///passwords.txt and http://web.com link",
            {},
            u'A file:///passwords.txt and http://web.com link',
        ),
        (
            "A file:///passwords.txt and http://web.com link",
            {"permitted_protocols": ["file"]},
            u'A file:///passwords.txt and http://web.com link',
        ),
        (
            "www.external-link.com",
            {"extra_params": 'rel="nofollow" class="external"'},
            u'www.external-link.com',  # noqa: E501
        ),
        (
            "www.external-link.com and www.internal-link.com/blogs extra",
            {
                "extra_params": lambda href: 'class="internal"'
                if href.startswith("http://www.internal-link.com")
                else 'rel="nofollow" class="external"'
            },
            u'www.external-link.com'  # noqa: E501
            u' and www.internal-link.com/blogs extra',  # noqa: E501
        ),
        (
            "www.external-link.com",
            {"extra_params": lambda href: '    rel="nofollow" class="external"  '},
            u'www.external-link.com',  # noqa: E501
        ),
    ]  # type: List[Tuple[Union[str, bytes], Dict[str, Any], str]]
    
    
    class EscapeTestCase(unittest.TestCase):
        def test_linkify(self):
            for text, kwargs, html in linkify_tests:
                linked = tornado.escape.linkify(text, **kwargs)
                self.assertEqual(linked, html)
    
        def test_xhtml_escape(self):
            tests = [
                ("", "<foo>"),
                (u"", u"<foo>"),
                (b"", b"<foo>"),
                ("<>&\"'", "<>&"'"),
                ("&", "&amp;"),
                (u"<\u00e9>", u"<\u00e9>"),
                (b"<\xc3\xa9>", b"<\xc3\xa9>"),
            ]  # type: List[Tuple[Union[str, bytes], Union[str, bytes]]]
            for unescaped, escaped in tests:
                self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
                self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
    
        def test_xhtml_unescape_numeric(self):
            tests = [
                ("foo bar", "foo bar"),
                ("foo bar", "foo bar"),
                ("foo bar", "foo bar"),
                ("foo઼bar", u"foo\u0abcbar"),
                ("foo&#xyz;bar", "foo&#xyz;bar"),  # invalid encoding
                ("foo&#;bar", "foo&#;bar"),  # invalid encoding
                ("foo&#x;bar", "foo&#x;bar"),  # invalid encoding
            ]
            for escaped, unescaped in tests:
                self.assertEqual(unescaped, xhtml_unescape(escaped))
    
        def test_url_escape_unicode(self):
            tests = [
                # byte strings are passed through as-is
                (u"\u00e9".encode("utf8"), "%C3%A9"),
                (u"\u00e9".encode("latin1"), "%E9"),
                # unicode strings become utf8
                (u"\u00e9", "%C3%A9"),
            ]  # type: List[Tuple[Union[str, bytes], str]]
            for unescaped, escaped in tests:
                self.assertEqual(url_escape(unescaped), escaped)
    
        def test_url_unescape_unicode(self):
            tests = [
                ("%C3%A9", u"\u00e9", "utf8"),
                ("%C3%A9", u"\u00c3\u00a9", "latin1"),
                ("%C3%A9", utf8(u"\u00e9"), None),
            ]
            for escaped, unescaped, encoding in tests:
                # input strings to url_unescape should only contain ascii
                # characters, but make sure the function accepts both byte
                # and unicode strings.
                self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
                self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
    
        def test_url_escape_quote_plus(self):
            unescaped = "+ #%"
            plus_escaped = "%2B+%23%25"
            escaped = "%2B%20%23%25"
            self.assertEqual(url_escape(unescaped), plus_escaped)
            self.assertEqual(url_escape(unescaped, plus=False), escaped)
            self.assertEqual(url_unescape(plus_escaped), unescaped)
            self.assertEqual(url_unescape(escaped, plus=False), unescaped)
            self.assertEqual(url_unescape(plus_escaped, encoding=None), utf8(unescaped))
            self.assertEqual(
                url_unescape(escaped, encoding=None, plus=False), utf8(unescaped)
            )
    
        def test_escape_return_types(self):
            # On python2 the escape methods should generally return the same
            # type as their argument
            self.assertEqual(type(xhtml_escape("foo")), str)
            self.assertEqual(type(xhtml_escape(u"foo")), unicode_type)
    
        def test_json_decode(self):
            # json_decode accepts both bytes and unicode, but strings it returns
            # are always unicode.
            self.assertEqual(json_decode(b'"foo"'), u"foo")
            self.assertEqual(json_decode(u'"foo"'), u"foo")
    
            # Non-ascii bytes are interpreted as utf8
            self.assertEqual(json_decode(utf8(u'"\u00e9"')), u"\u00e9")
    
        def test_json_encode(self):
            # json deals with strings, not bytes.  On python 2 byte strings will
            # convert automatically if they are utf8; on python 3 byte strings
            # are not allowed.
            self.assertEqual(json_decode(json_encode(u"\u00e9")), u"\u00e9")
            if bytes is str:
                self.assertEqual(json_decode(json_encode(utf8(u"\u00e9"))), u"\u00e9")
                self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
    
        def test_squeeze(self):
            self.assertEqual(
                squeeze(u"sequences     of    whitespace   chars"),
                u"sequences of whitespace chars",
            )
    
        def test_recursive_unicode(self):
            tests = {
                "dict": {b"foo": b"bar"},
                "list": [b"foo", b"bar"],
                "tuple": (b"foo", b"bar"),
                "bytes": b"foo",
            }
            self.assertEqual(recursive_unicode(tests["dict"]), {u"foo": u"bar"})
            self.assertEqual(recursive_unicode(tests["list"]), [u"foo", u"bar"])
            self.assertEqual(recursive_unicode(tests["tuple"]), (u"foo", u"bar"))
            self.assertEqual(recursive_unicode(tests["bytes"]), u"foo")
    tornado-6.1.0/tornado/test/gen_test.py000066400000000000000000001020561374705040500200040ustar00rootroot00000000000000import asyncio
    from concurrent import futures
    import gc
    import datetime
    import platform
    import sys
    import time
    import weakref
    import unittest
    
    from tornado.concurrent import Future
    from tornado.log import app_log
    from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
    from tornado.test.util import skipOnTravis, skipNotCPython
    from tornado.web import Application, RequestHandler, HTTPError
    
    from tornado import gen
    
    try:
        import contextvars
    except ImportError:
        contextvars = None  # type: ignore
    
    import typing
    
    if typing.TYPE_CHECKING:
        from typing import List, Optional  # noqa: F401
    
    
    class GenBasicTest(AsyncTestCase):
        @gen.coroutine
        def delay(self, iterations, arg):
            """Returns arg after a number of IOLoop iterations."""
            for i in range(iterations):
                yield gen.moment
            raise gen.Return(arg)
    
        @gen.coroutine
        def async_future(self, result):
            yield gen.moment
            return result
    
        @gen.coroutine
        def async_exception(self, e):
            yield gen.moment
            raise e
    
        @gen.coroutine
        def add_one_async(self, x):
            yield gen.moment
            raise gen.Return(x + 1)
    
        def test_no_yield(self):
            @gen.coroutine
            def f():
                pass
    
            self.io_loop.run_sync(f)
    
        def test_exception_phase1(self):
            @gen.coroutine
            def f():
                1 / 0
    
            self.assertRaises(ZeroDivisionError, self.io_loop.run_sync, f)
    
        def test_exception_phase2(self):
            @gen.coroutine
            def f():
                yield gen.moment
                1 / 0
    
            self.assertRaises(ZeroDivisionError, self.io_loop.run_sync, f)
    
        def test_bogus_yield(self):
            @gen.coroutine
            def f():
                yield 42
    
            self.assertRaises(gen.BadYieldError, self.io_loop.run_sync, f)
    
        def test_bogus_yield_tuple(self):
            @gen.coroutine
            def f():
                yield (1, 2)
    
            self.assertRaises(gen.BadYieldError, self.io_loop.run_sync, f)
    
        def test_reuse(self):
            @gen.coroutine
            def f():
                yield gen.moment
    
            self.io_loop.run_sync(f)
            self.io_loop.run_sync(f)
    
        def test_none(self):
            @gen.coroutine
            def f():
                yield None
    
            self.io_loop.run_sync(f)
    
        def test_multi(self):
            @gen.coroutine
            def f():
                results = yield [self.add_one_async(1), self.add_one_async(2)]
                self.assertEqual(results, [2, 3])
    
            self.io_loop.run_sync(f)
    
        def test_multi_dict(self):
            @gen.coroutine
            def f():
                results = yield dict(foo=self.add_one_async(1), bar=self.add_one_async(2))
                self.assertEqual(results, dict(foo=2, bar=3))
    
            self.io_loop.run_sync(f)
    
        def test_multi_delayed(self):
            @gen.coroutine
            def f():
                # callbacks run at different times
                responses = yield gen.multi_future(
                    [self.delay(3, "v1"), self.delay(1, "v2")]
                )
                self.assertEqual(responses, ["v1", "v2"])
    
            self.io_loop.run_sync(f)
    
        def test_multi_dict_delayed(self):
            @gen.coroutine
            def f():
                # callbacks run at different times
                responses = yield gen.multi_future(
                    dict(foo=self.delay(3, "v1"), bar=self.delay(1, "v2"))
                )
                self.assertEqual(responses, dict(foo="v1", bar="v2"))
    
            self.io_loop.run_sync(f)
    
        @skipOnTravis
        @gen_test
        def test_multi_performance(self):
            # Yielding a list used to have quadratic performance; make
            # sure a large list stays reasonable.  On my laptop a list of
            # 2000 used to take 1.8s, now it takes 0.12.
            start = time.time()
            yield [gen.moment for i in range(2000)]
            end = time.time()
            self.assertLess(end - start, 1.0)
    
        @gen_test
        def test_multi_empty(self):
            # Empty lists or dicts should return the same type.
            x = yield []
            self.assertTrue(isinstance(x, list))
            y = yield {}
            self.assertTrue(isinstance(y, dict))
    
        @gen_test
        def test_future(self):
            result = yield self.async_future(1)
            self.assertEqual(result, 1)
    
        @gen_test
        def test_multi_future(self):
            results = yield [self.async_future(1), self.async_future(2)]
            self.assertEqual(results, [1, 2])
    
        @gen_test
        def test_multi_future_duplicate(self):
            # Note that this doesn't work with native corotines, only with
            # decorated coroutines.
            f = self.async_future(2)
            results = yield [self.async_future(1), f, self.async_future(3), f]
            self.assertEqual(results, [1, 2, 3, 2])
    
        @gen_test
        def test_multi_dict_future(self):
            results = yield dict(foo=self.async_future(1), bar=self.async_future(2))
            self.assertEqual(results, dict(foo=1, bar=2))
    
        @gen_test
        def test_multi_exceptions(self):
            with ExpectLog(app_log, "Multiple exceptions in yield list"):
                with self.assertRaises(RuntimeError) as cm:
                    yield gen.Multi(
                        [
                            self.async_exception(RuntimeError("error 1")),
                            self.async_exception(RuntimeError("error 2")),
                        ]
                    )
            self.assertEqual(str(cm.exception), "error 1")
    
            # With only one exception, no error is logged.
            with self.assertRaises(RuntimeError):
                yield gen.Multi(
                    [self.async_exception(RuntimeError("error 1")), self.async_future(2)]
                )
    
            # Exception logging may be explicitly quieted.
            with self.assertRaises(RuntimeError):
                yield gen.Multi(
                    [
                        self.async_exception(RuntimeError("error 1")),
                        self.async_exception(RuntimeError("error 2")),
                    ],
                    quiet_exceptions=RuntimeError,
                )
    
        @gen_test
        def test_multi_future_exceptions(self):
            with ExpectLog(app_log, "Multiple exceptions in yield list"):
                with self.assertRaises(RuntimeError) as cm:
                    yield [
                        self.async_exception(RuntimeError("error 1")),
                        self.async_exception(RuntimeError("error 2")),
                    ]
            self.assertEqual(str(cm.exception), "error 1")
    
            # With only one exception, no error is logged.
            with self.assertRaises(RuntimeError):
                yield [self.async_exception(RuntimeError("error 1")), self.async_future(2)]
    
            # Exception logging may be explicitly quieted.
            with self.assertRaises(RuntimeError):
                yield gen.multi_future(
                    [
                        self.async_exception(RuntimeError("error 1")),
                        self.async_exception(RuntimeError("error 2")),
                    ],
                    quiet_exceptions=RuntimeError,
                )
    
        def test_sync_raise_return(self):
            @gen.coroutine
            def f():
                raise gen.Return()
    
            self.io_loop.run_sync(f)
    
        def test_async_raise_return(self):
            @gen.coroutine
            def f():
                yield gen.moment
                raise gen.Return()
    
            self.io_loop.run_sync(f)
    
        def test_sync_raise_return_value(self):
            @gen.coroutine
            def f():
                raise gen.Return(42)
    
            self.assertEqual(42, self.io_loop.run_sync(f))
    
        def test_sync_raise_return_value_tuple(self):
            @gen.coroutine
            def f():
                raise gen.Return((1, 2))
    
            self.assertEqual((1, 2), self.io_loop.run_sync(f))
    
        def test_async_raise_return_value(self):
            @gen.coroutine
            def f():
                yield gen.moment
                raise gen.Return(42)
    
            self.assertEqual(42, self.io_loop.run_sync(f))
    
        def test_async_raise_return_value_tuple(self):
            @gen.coroutine
            def f():
                yield gen.moment
                raise gen.Return((1, 2))
    
            self.assertEqual((1, 2), self.io_loop.run_sync(f))
    
    
    class GenCoroutineTest(AsyncTestCase):
        def setUp(self):
            # Stray StopIteration exceptions can lead to tests exiting prematurely,
            # so we need explicit checks here to make sure the tests run all
            # the way through.
            self.finished = False
            super().setUp()
    
        def tearDown(self):
            super().tearDown()
            assert self.finished
    
        def test_attributes(self):
            self.finished = True
    
            def f():
                yield gen.moment
    
            coro = gen.coroutine(f)
            self.assertEqual(coro.__name__, f.__name__)
            self.assertEqual(coro.__module__, f.__module__)
            self.assertIs(coro.__wrapped__, f)  # type: ignore
    
        def test_is_coroutine_function(self):
            self.finished = True
    
            def f():
                yield gen.moment
    
            coro = gen.coroutine(f)
            self.assertFalse(gen.is_coroutine_function(f))
            self.assertTrue(gen.is_coroutine_function(coro))
            self.assertFalse(gen.is_coroutine_function(coro()))
    
        @gen_test
        def test_sync_gen_return(self):
            @gen.coroutine
            def f():
                raise gen.Return(42)
    
            result = yield f()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_async_gen_return(self):
            @gen.coroutine
            def f():
                yield gen.moment
                raise gen.Return(42)
    
            result = yield f()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_sync_return(self):
            @gen.coroutine
            def f():
                return 42
    
            result = yield f()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_async_return(self):
            @gen.coroutine
            def f():
                yield gen.moment
                return 42
    
            result = yield f()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_async_early_return(self):
            # A yield statement exists but is not executed, which means
            # this function "returns" via an exception.  This exception
            # doesn't happen before the exception handling is set up.
            @gen.coroutine
            def f():
                if True:
                    return 42
                yield gen.Task(self.io_loop.add_callback)
    
            result = yield f()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_async_await(self):
            @gen.coroutine
            def f1():
                yield gen.moment
                raise gen.Return(42)
    
            # This test verifies that an async function can await a
            # yield-based gen.coroutine, and that a gen.coroutine
            # (the test method itself) can yield an async function.
            async def f2():
                result = await f1()
                return result
    
            result = yield f2()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_asyncio_sleep_zero(self):
            # asyncio.sleep(0) turns into a special case (equivalent to
            # `yield None`)
            async def f():
                import asyncio
    
                await asyncio.sleep(0)
                return 42
    
            result = yield f()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_async_await_mixed_multi_native_future(self):
            @gen.coroutine
            def f1():
                yield gen.moment
    
            async def f2():
                await f1()
                return 42
    
            @gen.coroutine
            def f3():
                yield gen.moment
                raise gen.Return(43)
    
            results = yield [f2(), f3()]
            self.assertEqual(results, [42, 43])
            self.finished = True
    
        @gen_test
        def test_async_with_timeout(self):
            async def f1():
                return 42
    
            result = yield gen.with_timeout(datetime.timedelta(hours=1), f1())
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_sync_return_no_value(self):
            @gen.coroutine
            def f():
                return
    
            result = yield f()
            self.assertEqual(result, None)
            self.finished = True
    
        @gen_test
        def test_async_return_no_value(self):
            # Without a return value we don't need python 3.3.
            @gen.coroutine
            def f():
                yield gen.moment
                return
    
            result = yield f()
            self.assertEqual(result, None)
            self.finished = True
    
        @gen_test
        def test_sync_raise(self):
            @gen.coroutine
            def f():
                1 / 0
    
            # The exception is raised when the future is yielded
            # (or equivalently when its result method is called),
            # not when the function itself is called).
            future = f()
            with self.assertRaises(ZeroDivisionError):
                yield future
            self.finished = True
    
        @gen_test
        def test_async_raise(self):
            @gen.coroutine
            def f():
                yield gen.moment
                1 / 0
    
            future = f()
            with self.assertRaises(ZeroDivisionError):
                yield future
            self.finished = True
    
        @gen_test
        def test_replace_yieldpoint_exception(self):
            # Test exception handling: a coroutine can catch one exception
            # raised by a yield point and raise a different one.
            @gen.coroutine
            def f1():
                1 / 0
    
            @gen.coroutine
            def f2():
                try:
                    yield f1()
                except ZeroDivisionError:
                    raise KeyError()
    
            future = f2()
            with self.assertRaises(KeyError):
                yield future
            self.finished = True
    
        @gen_test
        def test_swallow_yieldpoint_exception(self):
            # Test exception handling: a coroutine can catch an exception
            # raised by a yield point and not raise a different one.
            @gen.coroutine
            def f1():
                1 / 0
    
            @gen.coroutine
            def f2():
                try:
                    yield f1()
                except ZeroDivisionError:
                    raise gen.Return(42)
    
            result = yield f2()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_moment(self):
            calls = []
    
            @gen.coroutine
            def f(name, yieldable):
                for i in range(5):
                    calls.append(name)
                    yield yieldable
    
            # First, confirm the behavior without moment: each coroutine
            # monopolizes the event loop until it finishes.
            immediate = Future()  # type: Future[None]
            immediate.set_result(None)
            yield [f("a", immediate), f("b", immediate)]
            self.assertEqual("".join(calls), "aaaaabbbbb")
    
            # With moment, they take turns.
            calls = []
            yield [f("a", gen.moment), f("b", gen.moment)]
            self.assertEqual("".join(calls), "ababababab")
            self.finished = True
    
            calls = []
            yield [f("a", gen.moment), f("b", immediate)]
            self.assertEqual("".join(calls), "abbbbbaaaa")
    
        @gen_test
        def test_sleep(self):
            yield gen.sleep(0.01)
            self.finished = True
    
        @gen_test
        def test_py3_leak_exception_context(self):
            class LeakedException(Exception):
                pass
    
            @gen.coroutine
            def inner(iteration):
                raise LeakedException(iteration)
    
            try:
                yield inner(1)
            except LeakedException as e:
                self.assertEqual(str(e), "1")
                self.assertIsNone(e.__context__)
    
            try:
                yield inner(2)
            except LeakedException as e:
                self.assertEqual(str(e), "2")
                self.assertIsNone(e.__context__)
    
            self.finished = True
    
        @skipNotCPython
        @unittest.skipIf(
            (3,) < sys.version_info < (3, 6), "asyncio.Future has reference cycles"
        )
        def test_coroutine_refcounting(self):
            # On CPython, tasks and their arguments should be released immediately
            # without waiting for garbage collection.
            @gen.coroutine
            def inner():
                class Foo(object):
                    pass
    
                local_var = Foo()
                self.local_ref = weakref.ref(local_var)
    
                def dummy():
                    pass
    
                yield gen.coroutine(dummy)()
                raise ValueError("Some error")
    
            @gen.coroutine
            def inner2():
                try:
                    yield inner()
                except ValueError:
                    pass
    
            self.io_loop.run_sync(inner2, timeout=3)
    
            self.assertIs(self.local_ref(), None)
            self.finished = True
    
        def test_asyncio_future_debug_info(self):
            self.finished = True
            # Enable debug mode
            asyncio_loop = asyncio.get_event_loop()
            self.addCleanup(asyncio_loop.set_debug, asyncio_loop.get_debug())
            asyncio_loop.set_debug(True)
    
            def f():
                yield gen.moment
    
            coro = gen.coroutine(f)()
            self.assertIsInstance(coro, asyncio.Future)
            # We expect the coroutine repr() to show the place where
            # it was instantiated
            expected = "created at %s:%d" % (__file__, f.__code__.co_firstlineno + 3)
            actual = repr(coro)
            self.assertIn(expected, actual)
    
        @gen_test
        def test_asyncio_gather(self):
            # This demonstrates that tornado coroutines can be understood
            # by asyncio (This failed prior to Tornado 5.0).
            @gen.coroutine
            def f():
                yield gen.moment
                raise gen.Return(1)
    
            ret = yield asyncio.gather(f(), f())
            self.assertEqual(ret, [1, 1])
            self.finished = True
    
    
    class GenCoroutineSequenceHandler(RequestHandler):
        @gen.coroutine
        def get(self):
            yield gen.moment
            self.write("1")
            yield gen.moment
            self.write("2")
            yield gen.moment
            self.finish("3")
    
    
    class GenCoroutineUnfinishedSequenceHandler(RequestHandler):
        @gen.coroutine
        def get(self):
            yield gen.moment
            self.write("1")
            yield gen.moment
            self.write("2")
            yield gen.moment
            # just write, don't finish
            self.write("3")
    
    
    # "Undecorated" here refers to the absence of @asynchronous.
    class UndecoratedCoroutinesHandler(RequestHandler):
        @gen.coroutine
        def prepare(self):
            self.chunks = []  # type: List[str]
            yield gen.moment
            self.chunks.append("1")
    
        @gen.coroutine
        def get(self):
            self.chunks.append("2")
            yield gen.moment
            self.chunks.append("3")
            yield gen.moment
            self.write("".join(self.chunks))
    
    
    class AsyncPrepareErrorHandler(RequestHandler):
        @gen.coroutine
        def prepare(self):
            yield gen.moment
            raise HTTPError(403)
    
        def get(self):
            self.finish("ok")
    
    
    class NativeCoroutineHandler(RequestHandler):
        async def get(self):
            await asyncio.sleep(0)
            self.write("ok")
    
    
    class GenWebTest(AsyncHTTPTestCase):
        def get_app(self):
            return Application(
                [
                    ("/coroutine_sequence", GenCoroutineSequenceHandler),
                    (
                        "/coroutine_unfinished_sequence",
                        GenCoroutineUnfinishedSequenceHandler,
                    ),
                    ("/undecorated_coroutine", UndecoratedCoroutinesHandler),
                    ("/async_prepare_error", AsyncPrepareErrorHandler),
                    ("/native_coroutine", NativeCoroutineHandler),
                ]
            )
    
        def test_coroutine_sequence_handler(self):
            response = self.fetch("/coroutine_sequence")
            self.assertEqual(response.body, b"123")
    
        def test_coroutine_unfinished_sequence_handler(self):
            response = self.fetch("/coroutine_unfinished_sequence")
            self.assertEqual(response.body, b"123")
    
        def test_undecorated_coroutines(self):
            response = self.fetch("/undecorated_coroutine")
            self.assertEqual(response.body, b"123")
    
        def test_async_prepare_error_handler(self):
            response = self.fetch("/async_prepare_error")
            self.assertEqual(response.code, 403)
    
        def test_native_coroutine_handler(self):
            response = self.fetch("/native_coroutine")
            self.assertEqual(response.code, 200)
            self.assertEqual(response.body, b"ok")
    
    
    class WithTimeoutTest(AsyncTestCase):
        @gen_test
        def test_timeout(self):
            with self.assertRaises(gen.TimeoutError):
                yield gen.with_timeout(datetime.timedelta(seconds=0.1), Future())
    
        @gen_test
        def test_completes_before_timeout(self):
            future = Future()  # type: Future[str]
            self.io_loop.add_timeout(
                datetime.timedelta(seconds=0.1), lambda: future.set_result("asdf")
            )
            result = yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
            self.assertEqual(result, "asdf")
    
        @gen_test
        def test_fails_before_timeout(self):
            future = Future()  # type: Future[str]
            self.io_loop.add_timeout(
                datetime.timedelta(seconds=0.1),
                lambda: future.set_exception(ZeroDivisionError()),
            )
            with self.assertRaises(ZeroDivisionError):
                yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
    
        @gen_test
        def test_already_resolved(self):
            future = Future()  # type: Future[str]
            future.set_result("asdf")
            result = yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
            self.assertEqual(result, "asdf")
    
        @gen_test
        def test_timeout_concurrent_future(self):
            # A concurrent future that does not resolve before the timeout.
            with futures.ThreadPoolExecutor(1) as executor:
                with self.assertRaises(gen.TimeoutError):
                    yield gen.with_timeout(
                        self.io_loop.time(), executor.submit(time.sleep, 0.1)
                    )
    
        @gen_test
        def test_completed_concurrent_future(self):
            # A concurrent future that is resolved before we even submit it
            # to with_timeout.
            with futures.ThreadPoolExecutor(1) as executor:
    
                def dummy():
                    pass
    
                f = executor.submit(dummy)
                f.result()  # wait for completion
                yield gen.with_timeout(datetime.timedelta(seconds=3600), f)
    
        @gen_test
        def test_normal_concurrent_future(self):
            # A conccurrent future that resolves while waiting for the timeout.
            with futures.ThreadPoolExecutor(1) as executor:
                yield gen.with_timeout(
                    datetime.timedelta(seconds=3600),
                    executor.submit(lambda: time.sleep(0.01)),
                )
    
    
    class WaitIteratorTest(AsyncTestCase):
        @gen_test
        def test_empty_iterator(self):
            g = gen.WaitIterator()
            self.assertTrue(g.done(), "empty generator iterated")
    
            with self.assertRaises(ValueError):
                g = gen.WaitIterator(Future(), bar=Future())
    
            self.assertEqual(g.current_index, None, "bad nil current index")
            self.assertEqual(g.current_future, None, "bad nil current future")
    
        @gen_test
        def test_already_done(self):
            f1 = Future()  # type: Future[int]
            f2 = Future()  # type: Future[int]
            f3 = Future()  # type: Future[int]
            f1.set_result(24)
            f2.set_result(42)
            f3.set_result(84)
    
            g = gen.WaitIterator(f1, f2, f3)
            i = 0
            while not g.done():
                r = yield g.next()
                # Order is not guaranteed, but the current implementation
                # preserves ordering of already-done Futures.
                if i == 0:
                    self.assertEqual(g.current_index, 0)
                    self.assertIs(g.current_future, f1)
                    self.assertEqual(r, 24)
                elif i == 1:
                    self.assertEqual(g.current_index, 1)
                    self.assertIs(g.current_future, f2)
                    self.assertEqual(r, 42)
                elif i == 2:
                    self.assertEqual(g.current_index, 2)
                    self.assertIs(g.current_future, f3)
                    self.assertEqual(r, 84)
                i += 1
    
            self.assertEqual(g.current_index, None, "bad nil current index")
            self.assertEqual(g.current_future, None, "bad nil current future")
    
            dg = gen.WaitIterator(f1=f1, f2=f2)
    
            while not dg.done():
                dr = yield dg.next()
                if dg.current_index == "f1":
                    self.assertTrue(
                        dg.current_future == f1 and dr == 24,
                        "WaitIterator dict status incorrect",
                    )
                elif dg.current_index == "f2":
                    self.assertTrue(
                        dg.current_future == f2 and dr == 42,
                        "WaitIterator dict status incorrect",
                    )
                else:
                    self.fail("got bad WaitIterator index {}".format(dg.current_index))
    
                i += 1
    
            self.assertEqual(dg.current_index, None, "bad nil current index")
            self.assertEqual(dg.current_future, None, "bad nil current future")
    
        def finish_coroutines(self, iteration, futures):
            if iteration == 3:
                futures[2].set_result(24)
            elif iteration == 5:
                futures[0].set_exception(ZeroDivisionError())
            elif iteration == 8:
                futures[1].set_result(42)
                futures[3].set_result(84)
    
            if iteration < 8:
                self.io_loop.add_callback(self.finish_coroutines, iteration + 1, futures)
    
        @gen_test
        def test_iterator(self):
            futures = [Future(), Future(), Future(), Future()]  # type: List[Future[int]]
    
            self.finish_coroutines(0, futures)
    
            g = gen.WaitIterator(*futures)
    
            i = 0
            while not g.done():
                try:
                    r = yield g.next()
                except ZeroDivisionError:
                    self.assertIs(g.current_future, futures[0], "exception future invalid")
                else:
                    if i == 0:
                        self.assertEqual(r, 24, "iterator value incorrect")
                        self.assertEqual(g.current_index, 2, "wrong index")
                    elif i == 2:
                        self.assertEqual(r, 42, "iterator value incorrect")
                        self.assertEqual(g.current_index, 1, "wrong index")
                    elif i == 3:
                        self.assertEqual(r, 84, "iterator value incorrect")
                        self.assertEqual(g.current_index, 3, "wrong index")
                i += 1
    
        @gen_test
        def test_iterator_async_await(self):
            # Recreate the previous test with py35 syntax. It's a little clunky
            # because of the way the previous test handles an exception on
            # a single iteration.
            futures = [Future(), Future(), Future(), Future()]  # type: List[Future[int]]
            self.finish_coroutines(0, futures)
            self.finished = False
    
            async def f():
                i = 0
                g = gen.WaitIterator(*futures)
                try:
                    async for r in g:
                        if i == 0:
                            self.assertEqual(r, 24, "iterator value incorrect")
                            self.assertEqual(g.current_index, 2, "wrong index")
                        else:
                            raise Exception("expected exception on iteration 1")
                        i += 1
                except ZeroDivisionError:
                    i += 1
                async for r in g:
                    if i == 2:
                        self.assertEqual(r, 42, "iterator value incorrect")
                        self.assertEqual(g.current_index, 1, "wrong index")
                    elif i == 3:
                        self.assertEqual(r, 84, "iterator value incorrect")
                        self.assertEqual(g.current_index, 3, "wrong index")
                    else:
                        raise Exception("didn't expect iteration %d" % i)
                    i += 1
                self.finished = True
    
            yield f()
            self.assertTrue(self.finished)
    
        @gen_test
        def test_no_ref(self):
            # In this usage, there is no direct hard reference to the
            # WaitIterator itself, only the Future it returns. Since
            # WaitIterator uses weak references internally to improve GC
            # performance, this used to cause problems.
            yield gen.with_timeout(
                datetime.timedelta(seconds=0.1), gen.WaitIterator(gen.sleep(0)).next()
            )
    
    
    class RunnerGCTest(AsyncTestCase):
        def is_pypy3(self):
            return platform.python_implementation() == "PyPy" and sys.version_info > (3,)
    
        @gen_test
        def test_gc(self):
            # GitHub issue 1769: Runner objects can get GCed unexpectedly
            # while their future is alive.
            weakref_scope = [None]  # type: List[Optional[weakref.ReferenceType]]
    
            def callback():
                gc.collect(2)
                weakref_scope[0]().set_result(123)  # type: ignore
    
            @gen.coroutine
            def tester():
                fut = Future()  # type: Future[int]
                weakref_scope[0] = weakref.ref(fut)
                self.io_loop.add_callback(callback)
                yield fut
    
            yield gen.with_timeout(datetime.timedelta(seconds=0.2), tester())
    
        def test_gc_infinite_coro(self):
            # GitHub issue 2229: suspended coroutines should be GCed when
            # their loop is closed, even if they're involved in a reference
            # cycle.
            loop = self.get_new_ioloop()
            result = []  # type: List[Optional[bool]]
            wfut = []
    
            @gen.coroutine
            def infinite_coro():
                try:
                    while True:
                        yield gen.sleep(1e-3)
                        result.append(True)
                finally:
                    # coroutine finalizer
                    result.append(None)
    
            @gen.coroutine
            def do_something():
                fut = infinite_coro()
                fut._refcycle = fut  # type: ignore
                wfut.append(weakref.ref(fut))
                yield gen.sleep(0.2)
    
            loop.run_sync(do_something)
            loop.close()
            gc.collect()
            # Future was collected
            self.assertIs(wfut[0](), None)
            # At least one wakeup
            self.assertGreaterEqual(len(result), 2)
            if not self.is_pypy3():
                # coroutine finalizer was called (not on PyPy3 apparently)
                self.assertIs(result[-1], None)
    
        def test_gc_infinite_async_await(self):
            # Same as test_gc_infinite_coro, but with a `async def` function
            import asyncio
    
            async def infinite_coro(result):
                try:
                    while True:
                        await gen.sleep(1e-3)
                        result.append(True)
                finally:
                    # coroutine finalizer
                    result.append(None)
    
            loop = self.get_new_ioloop()
            result = []  # type: List[Optional[bool]]
            wfut = []
    
            @gen.coroutine
            def do_something():
                fut = asyncio.get_event_loop().create_task(infinite_coro(result))
                fut._refcycle = fut  # type: ignore
                wfut.append(weakref.ref(fut))
                yield gen.sleep(0.2)
    
            loop.run_sync(do_something)
            with ExpectLog("asyncio", "Task was destroyed but it is pending"):
                loop.close()
                gc.collect()
            # Future was collected
            self.assertIs(wfut[0](), None)
            # At least one wakeup and one finally
            self.assertGreaterEqual(len(result), 2)
            if not self.is_pypy3():
                # coroutine finalizer was called (not on PyPy3 apparently)
                self.assertIs(result[-1], None)
    
        def test_multi_moment(self):
            # Test gen.multi with moment
            # now that it's not a real Future
            @gen.coroutine
            def wait_a_moment():
                result = yield gen.multi([gen.moment, gen.moment])
                raise gen.Return(result)
    
            loop = self.get_new_ioloop()
            result = loop.run_sync(wait_a_moment)
            self.assertEqual(result, [None, None])
    
    
    if contextvars is not None:
        ctx_var = contextvars.ContextVar("ctx_var")  # type: contextvars.ContextVar[int]
    
    
    @unittest.skipIf(contextvars is None, "contextvars module not present")
    class ContextVarsTest(AsyncTestCase):
        async def native_root(self, x):
            ctx_var.set(x)
            await self.inner(x)
    
        @gen.coroutine
        def gen_root(self, x):
            ctx_var.set(x)
            yield
            yield self.inner(x)
    
        async def inner(self, x):
            self.assertEqual(ctx_var.get(), x)
            await self.gen_inner(x)
            self.assertEqual(ctx_var.get(), x)
    
            # IOLoop.run_in_executor doesn't automatically copy context
            ctx = contextvars.copy_context()
            await self.io_loop.run_in_executor(None, lambda: ctx.run(self.thread_inner, x))
            self.assertEqual(ctx_var.get(), x)
    
            # Neither does asyncio's run_in_executor.
            await asyncio.get_event_loop().run_in_executor(
                None, lambda: ctx.run(self.thread_inner, x)
            )
            self.assertEqual(ctx_var.get(), x)
    
        @gen.coroutine
        def gen_inner(self, x):
            self.assertEqual(ctx_var.get(), x)
            yield
            self.assertEqual(ctx_var.get(), x)
    
        def thread_inner(self, x):
            self.assertEqual(ctx_var.get(), x)
    
        @gen_test
        def test_propagate(self):
            # Verify that context vars get propagated across various
            # combinations of native and decorated coroutines.
            yield [
                self.native_root(1),
                self.native_root(2),
                self.gen_root(3),
                self.gen_root(4),
            ]
    
        @gen_test
        def test_reset(self):
            token = ctx_var.set(1)
            yield
            # reset asserts that we are still at the same level of the context tree,
            # so we must make sure that we maintain that property across yield.
            ctx_var.reset(token)
    
    
    if __name__ == "__main__":
        unittest.main()
    tornado-6.1.0/tornado/test/gettext_translations/000077500000000000000000000000001374705040500221035ustar00rootroot00000000000000tornado-6.1.0/tornado/test/gettext_translations/extract_me.py000066400000000000000000000012201374705040500246030ustar00rootroot00000000000000# flake8: noqa
    # Dummy source file to allow creation of the initial .po file in the
    # same way as a real project.  I'm not entirely sure about the real
    # workflow here, but this seems to work.
    #
    # 1) xgettext --language=Python --keyword=_:1,2 --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3 extract_me.py -o tornado_test.po
    # 2) Edit tornado_test.po, setting CHARSET, Plural-Forms and setting msgstr
    # 3) msgfmt tornado_test.po -o tornado_test.mo
    # 4) Put the file in the proper location: $LANG/LC_MESSAGES
    
    _("school")
    pgettext("law", "right")
    pgettext("good", "right")
    pgettext("organization", "club", "clubs", 1)
    pgettext("stick", "club", "clubs", 1)
    tornado-6.1.0/tornado/test/gettext_translations/fr_FR/000077500000000000000000000000001374705040500231015ustar00rootroot00000000000000tornado-6.1.0/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/000077500000000000000000000000001374705040500246665ustar00rootroot00000000000000tornado-6.1.0/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo000066400000000000000000000012311374705040500277250ustar00rootroot00000000000000Þ•L|¨
    ©	´¾ÖÝjîYaj|ƒgoodrightlawrightorganizationclubclubsschoolstickclubclubsProject-Id-Version: PACKAGE VERSION
    Report-Msgid-Bugs-To: 
    POT-Creation-Date: 2015-01-27 11:05+0300
    PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
    Last-Translator: FULL NAME 
    Language-Team: LANGUAGE 
    Language: 
    MIME-Version: 1.0
    Content-Type: text/plain; charset=utf-8
    Content-Transfer-Encoding: 8bit
    Plural-Forms: nplurals=2; plural=(n > 1);
    le bienle droitle clubles clubsécolele bâtonles bâtonstornado-6.1.0/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po000066400000000000000000000020311374705040500277270ustar00rootroot00000000000000# SOME DESCRIPTIVE TITLE.
    # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
    # This file is distributed under the same license as the PACKAGE package.
    # FIRST AUTHOR , YEAR.
    #
    #, fuzzy
    msgid ""
    msgstr ""
    "Project-Id-Version: PACKAGE VERSION\n"
    "Report-Msgid-Bugs-To: \n"
    "POT-Creation-Date: 2015-01-27 11:05+0300\n"
    "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
    "Last-Translator: FULL NAME \n"
    "Language-Team: LANGUAGE \n"
    "Language: \n"
    "MIME-Version: 1.0\n"
    "Content-Type: text/plain; charset=utf-8\n"
    "Content-Transfer-Encoding: 8bit\n"
    "Plural-Forms: nplurals=2; plural=(n > 1);\n"
    
    #: extract_me.py:11
    msgid "school"
    msgstr "école"
    
    #: extract_me.py:12
    msgctxt "law"
    msgid "right"
    msgstr "le droit"
    
    #: extract_me.py:13
    msgctxt "good"
    msgid "right"
    msgstr "le bien"
    
    #: extract_me.py:14
    msgctxt "organization"
    msgid "club"
    msgid_plural "clubs"
    msgstr[0] "le club"
    msgstr[1] "les clubs"
    
    #: extract_me.py:15
    msgctxt "stick"
    msgid "club"
    msgid_plural "clubs"
    msgstr[0] "le bâton"
    msgstr[1] "les bâtons"
    tornado-6.1.0/tornado/test/http1connection_test.py000066400000000000000000000036361374705040500223570ustar00rootroot00000000000000import socket
    import typing
    
    from tornado.http1connection import HTTP1Connection
    from tornado.httputil import HTTPMessageDelegate
    from tornado.iostream import IOStream
    from tornado.locks import Event
    from tornado.netutil import add_accept_handler
    from tornado.testing import AsyncTestCase, bind_unused_port, gen_test
    
    
    class HTTP1ConnectionTest(AsyncTestCase):
        code = None  # type: typing.Optional[int]
    
        def setUp(self):
            super().setUp()
            self.asyncSetUp()
    
        @gen_test
        def asyncSetUp(self):
            listener, port = bind_unused_port()
            event = Event()
    
            def accept_callback(conn, addr):
                self.server_stream = IOStream(conn)
                self.addCleanup(self.server_stream.close)
                event.set()
    
            add_accept_handler(listener, accept_callback)
            self.client_stream = IOStream(socket.socket())
            self.addCleanup(self.client_stream.close)
            yield [self.client_stream.connect(("127.0.0.1", port)), event.wait()]
            self.io_loop.remove_handler(listener)
            listener.close()
    
        @gen_test
        def test_http10_no_content_length(self):
            # Regression test for a bug in which can_keep_alive would crash
            # for an HTTP/1.0 (not 1.1) response with no content-length.
            conn = HTTP1Connection(self.client_stream, True)
            self.server_stream.write(b"HTTP/1.0 200 Not Modified\r\n\r\nhello")
            self.server_stream.close()
    
            event = Event()
            test = self
            body = []
    
            class Delegate(HTTPMessageDelegate):
                def headers_received(self, start_line, headers):
                    test.code = start_line.code
    
                def data_received(self, data):
                    body.append(data)
    
                def finish(self):
                    event.set()
    
            yield conn.read_response(Delegate())
            yield event.wait()
            self.assertEqual(self.code, 200)
            self.assertEqual(b"".join(body), b"hello")
    tornado-6.1.0/tornado/test/httpclient_test.py000066400000000000000000001026671374705040500214210ustar00rootroot00000000000000import base64
    import binascii
    from contextlib import closing
    import copy
    import gzip
    import threading
    import datetime
    from io import BytesIO
    import subprocess
    import sys
    import time
    import typing  # noqa: F401
    import unicodedata
    import unittest
    
    from tornado.escape import utf8, native_str, to_unicode
    from tornado import gen
    from tornado.httpclient import (
        HTTPRequest,
        HTTPResponse,
        _RequestProxy,
        HTTPError,
        HTTPClient,
    )
    from tornado.httpserver import HTTPServer
    from tornado.ioloop import IOLoop
    from tornado.iostream import IOStream
    from tornado.log import gen_log, app_log
    from tornado import netutil
    from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
    from tornado.test.util import skipOnTravis
    from tornado.web import Application, RequestHandler, url
    from tornado.httputil import format_timestamp, HTTPHeaders
    
    
    class HelloWorldHandler(RequestHandler):
        def get(self):
            name = self.get_argument("name", "world")
            self.set_header("Content-Type", "text/plain")
            self.finish("Hello %s!" % name)
    
    
    class PostHandler(RequestHandler):
        def post(self):
            self.finish(
                "Post arg1: %s, arg2: %s"
                % (self.get_argument("arg1"), self.get_argument("arg2"))
            )
    
    
    class PutHandler(RequestHandler):
        def put(self):
            self.write("Put body: ")
            self.write(self.request.body)
    
    
    class RedirectHandler(RequestHandler):
        def prepare(self):
            self.write("redirects can have bodies too")
            self.redirect(
                self.get_argument("url"), status=int(self.get_argument("status", "302"))
            )
    
    
    class RedirectWithoutLocationHandler(RequestHandler):
        def prepare(self):
            # For testing error handling of a redirect with no location header.
            self.set_status(301)
            self.finish()
    
    
    class ChunkHandler(RequestHandler):
        @gen.coroutine
        def get(self):
            self.write("asdf")
            self.flush()
            # Wait a bit to ensure the chunks are sent and received separately.
            yield gen.sleep(0.01)
            self.write("qwer")
    
    
    class AuthHandler(RequestHandler):
        def get(self):
            self.finish(self.request.headers["Authorization"])
    
    
    class CountdownHandler(RequestHandler):
        def get(self, count):
            count = int(count)
            if count > 0:
                self.redirect(self.reverse_url("countdown", count - 1))
            else:
                self.write("Zero")
    
    
    class EchoPostHandler(RequestHandler):
        def post(self):
            self.write(self.request.body)
    
    
    class UserAgentHandler(RequestHandler):
        def get(self):
            self.write(self.request.headers.get("User-Agent", "User agent not set"))
    
    
    class ContentLength304Handler(RequestHandler):
        def get(self):
            self.set_status(304)
            self.set_header("Content-Length", 42)
    
        def _clear_representation_headers(self):
            # Tornado strips content-length from 304 responses, but here we
            # want to simulate servers that include the headers anyway.
            pass
    
    
    class PatchHandler(RequestHandler):
        def patch(self):
            "Return the request payload - so we can check it is being kept"
            self.write(self.request.body)
    
    
    class AllMethodsHandler(RequestHandler):
        SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ("OTHER",)  # type: ignore
    
        def method(self):
            assert self.request.method is not None
            self.write(self.request.method)
    
        get = head = post = put = delete = options = patch = other = method  # type: ignore
    
    
    class SetHeaderHandler(RequestHandler):
        def get(self):
            # Use get_arguments for keys to get strings, but
            # request.arguments for values to get bytes.
            for k, v in zip(self.get_arguments("k"), self.request.arguments["v"]):
                self.set_header(k, v)
    
    
    class InvalidGzipHandler(RequestHandler):
        def get(self):
            # set Content-Encoding manually to avoid automatic gzip encoding
            self.set_header("Content-Type", "text/plain")
            self.set_header("Content-Encoding", "gzip")
            # Triggering the potential bug seems to depend on input length.
            # This length is taken from the bad-response example reported in
            # https://github.com/tornadoweb/tornado/pull/2875 (uncompressed).
            body = "".join("Hello World {}\n".format(i) for i in range(9000))[:149051]
            body = gzip.compress(body.encode(), compresslevel=6) + b"\00"
            self.write(body)
    
    
    # These tests end up getting run redundantly: once here with the default
    # HTTPClient implementation, and then again in each implementation's own
    # test suite.
    
    
    class HTTPClientCommonTestCase(AsyncHTTPTestCase):
        def get_app(self):
            return Application(
                [
                    url("/hello", HelloWorldHandler),
                    url("/post", PostHandler),
                    url("/put", PutHandler),
                    url("/redirect", RedirectHandler),
                    url("/redirect_without_location", RedirectWithoutLocationHandler),
                    url("/chunk", ChunkHandler),
                    url("/auth", AuthHandler),
                    url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
                    url("/echopost", EchoPostHandler),
                    url("/user_agent", UserAgentHandler),
                    url("/304_with_content_length", ContentLength304Handler),
                    url("/all_methods", AllMethodsHandler),
                    url("/patch", PatchHandler),
                    url("/set_header", SetHeaderHandler),
                    url("/invalid_gzip", InvalidGzipHandler),
                ],
                gzip=True,
            )
    
        def test_patch_receives_payload(self):
            body = b"some patch data"
            response = self.fetch("/patch", method="PATCH", body=body)
            self.assertEqual(response.code, 200)
            self.assertEqual(response.body, body)
    
        @skipOnTravis
        def test_hello_world(self):
            response = self.fetch("/hello")
            self.assertEqual(response.code, 200)
            self.assertEqual(response.headers["Content-Type"], "text/plain")
            self.assertEqual(response.body, b"Hello world!")
            assert response.request_time is not None
            self.assertEqual(int(response.request_time), 0)
    
            response = self.fetch("/hello?name=Ben")
            self.assertEqual(response.body, b"Hello Ben!")
    
        def test_streaming_callback(self):
            # streaming_callback is also tested in test_chunked
            chunks = []  # type: typing.List[bytes]
            response = self.fetch("/hello", streaming_callback=chunks.append)
            # with streaming_callback, data goes to the callback and not response.body
            self.assertEqual(chunks, [b"Hello world!"])
            self.assertFalse(response.body)
    
        def test_post(self):
            response = self.fetch("/post", method="POST", body="arg1=foo&arg2=bar")
            self.assertEqual(response.code, 200)
            self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
    
        def test_chunked(self):
            response = self.fetch("/chunk")
            self.assertEqual(response.body, b"asdfqwer")
    
            chunks = []  # type: typing.List[bytes]
            response = self.fetch("/chunk", streaming_callback=chunks.append)
            self.assertEqual(chunks, [b"asdf", b"qwer"])
            self.assertFalse(response.body)
    
        def test_chunked_close(self):
            # test case in which chunks spread read-callback processing
            # over several ioloop iterations, but the connection is already closed.
            sock, port = bind_unused_port()
            with closing(sock):
    
                @gen.coroutine
                def accept_callback(conn, address):
                    # fake an HTTP server using chunked encoding where the final chunks
                    # and connection close all happen at once
                    stream = IOStream(conn)
                    request_data = yield stream.read_until(b"\r\n\r\n")
                    if b"HTTP/1." not in request_data:
                        self.skipTest("requires HTTP/1.x")
                    yield stream.write(
                        b"""\
    HTTP/1.1 200 OK
    Transfer-Encoding: chunked
    
    1
    1
    1
    2
    0
    
    """.replace(
                            b"\n", b"\r\n"
                        )
                    )
                    stream.close()
    
                netutil.add_accept_handler(sock, accept_callback)  # type: ignore
                resp = self.fetch("http://127.0.0.1:%d/" % port)
                resp.rethrow()
                self.assertEqual(resp.body, b"12")
                self.io_loop.remove_handler(sock.fileno())
    
        def test_basic_auth(self):
            # This test data appears in section 2 of RFC 7617.
            self.assertEqual(
                self.fetch(
                    "/auth", auth_username="Aladdin", auth_password="open sesame"
                ).body,
                b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==",
            )
    
        def test_basic_auth_explicit_mode(self):
            self.assertEqual(
                self.fetch(
                    "/auth",
                    auth_username="Aladdin",
                    auth_password="open sesame",
                    auth_mode="basic",
                ).body,
                b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==",
            )
    
        def test_basic_auth_unicode(self):
            # This test data appears in section 2.1 of RFC 7617.
            self.assertEqual(
                self.fetch("/auth", auth_username="test", auth_password="123£").body,
                b"Basic dGVzdDoxMjPCow==",
            )
    
            # The standard mandates NFC. Give it a decomposed username
            # and ensure it is normalized to composed form.
            username = unicodedata.normalize("NFD", u"josé")
            self.assertEqual(
                self.fetch("/auth", auth_username=username, auth_password="səcrət").body,
                b"Basic am9zw6k6c8mZY3LJmXQ=",
            )
    
        def test_unsupported_auth_mode(self):
            # curl and simple clients handle errors a bit differently; the
            # important thing is that they don't fall back to basic auth
            # on an unknown mode.
            with ExpectLog(gen_log, "uncaught exception", required=False):
                with self.assertRaises((ValueError, HTTPError)):  # type: ignore
                    self.fetch(
                        "/auth",
                        auth_username="Aladdin",
                        auth_password="open sesame",
                        auth_mode="asdf",
                        raise_error=True,
                    )
    
        def test_follow_redirect(self):
            response = self.fetch("/countdown/2", follow_redirects=False)
            self.assertEqual(302, response.code)
            self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
    
            response = self.fetch("/countdown/2")
            self.assertEqual(200, response.code)
            self.assertTrue(response.effective_url.endswith("/countdown/0"))
            self.assertEqual(b"Zero", response.body)
    
        def test_redirect_without_location(self):
            response = self.fetch("/redirect_without_location", follow_redirects=True)
            # If there is no location header, the redirect response should
            # just be returned as-is. (This should arguably raise an
            # error, but libcurl doesn't treat this as an error, so we
            # don't either).
            self.assertEqual(301, response.code)
    
        def test_redirect_put_with_body(self):
            response = self.fetch(
                "/redirect?url=/put&status=307", method="PUT", body="hello"
            )
            self.assertEqual(response.body, b"Put body: hello")
    
        def test_redirect_put_without_body(self):
            # This "without body" edge case is similar to what happens with body_producer.
            response = self.fetch(
                "/redirect?url=/put&status=307",
                method="PUT",
                allow_nonstandard_methods=True,
            )
            self.assertEqual(response.body, b"Put body: ")
    
        def test_method_after_redirect(self):
            # Legacy redirect codes (301, 302) convert POST requests to GET.
            for status in [301, 302, 303]:
                url = "/redirect?url=/all_methods&status=%d" % status
                resp = self.fetch(url, method="POST", body=b"")
                self.assertEqual(b"GET", resp.body)
    
                # Other methods are left alone, except for 303 redirect, depending on client
                for method in ["GET", "OPTIONS", "PUT", "DELETE"]:
                    resp = self.fetch(url, method=method, allow_nonstandard_methods=True)
                    if status in [301, 302]:
                        self.assertEqual(utf8(method), resp.body)
                    else:
                        self.assertIn(resp.body, [utf8(method), b"GET"])
    
                # HEAD is different so check it separately.
                resp = self.fetch(url, method="HEAD")
                self.assertEqual(200, resp.code)
                self.assertEqual(b"", resp.body)
    
            # Newer redirects always preserve the original method.
            for status in [307, 308]:
                url = "/redirect?url=/all_methods&status=307"
                for method in ["GET", "OPTIONS", "POST", "PUT", "DELETE"]:
                    resp = self.fetch(url, method=method, allow_nonstandard_methods=True)
                    self.assertEqual(method, to_unicode(resp.body))
                resp = self.fetch(url, method="HEAD")
                self.assertEqual(200, resp.code)
                self.assertEqual(b"", resp.body)
    
        def test_credentials_in_url(self):
            url = self.get_url("/auth").replace("http://", "http://me:secret@")
            response = self.fetch(url)
            self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"), response.body)
    
        def test_body_encoding(self):
            unicode_body = u"\xe9"
            byte_body = binascii.a2b_hex(b"e9")
    
            # unicode string in body gets converted to utf8
            response = self.fetch(
                "/echopost",
                method="POST",
                body=unicode_body,
                headers={"Content-Type": "application/blah"},
            )
            self.assertEqual(response.headers["Content-Length"], "2")
            self.assertEqual(response.body, utf8(unicode_body))
    
            # byte strings pass through directly
            response = self.fetch(
                "/echopost",
                method="POST",
                body=byte_body,
                headers={"Content-Type": "application/blah"},
            )
            self.assertEqual(response.headers["Content-Length"], "1")
            self.assertEqual(response.body, byte_body)
    
            # Mixing unicode in headers and byte string bodies shouldn't
            # break anything
            response = self.fetch(
                "/echopost",
                method="POST",
                body=byte_body,
                headers={"Content-Type": "application/blah"},
                user_agent=u"foo",
            )
            self.assertEqual(response.headers["Content-Length"], "1")
            self.assertEqual(response.body, byte_body)
    
        def test_types(self):
            response = self.fetch("/hello")
            self.assertEqual(type(response.body), bytes)
            self.assertEqual(type(response.headers["Content-Type"]), str)
            self.assertEqual(type(response.code), int)
            self.assertEqual(type(response.effective_url), str)
    
        def test_gzip(self):
            # All the tests in this file should be using gzip, but this test
            # ensures that it is in fact getting compressed, and also tests
            # the httpclient's decompress=False option.
            # Setting Accept-Encoding manually bypasses the client's
            # decompression so we can see the raw data.
            response = self.fetch(
                "/chunk", decompress_response=False, headers={"Accept-Encoding": "gzip"}
            )
            self.assertEqual(response.headers["Content-Encoding"], "gzip")
            self.assertNotEqual(response.body, b"asdfqwer")
            # Our test data gets bigger when gzipped.  Oops.  :)
            # Chunked encoding bypasses the MIN_LENGTH check.
            self.assertEqual(len(response.body), 34)
            f = gzip.GzipFile(mode="r", fileobj=response.buffer)
            self.assertEqual(f.read(), b"asdfqwer")
    
        def test_invalid_gzip(self):
            # test if client hangs on tricky invalid gzip
            # curl/simple httpclient have different behavior (exception, logging)
            with ExpectLog(
                app_log, "(Uncaught exception|Exception in callback)", required=False
            ):
                try:
                    response = self.fetch("/invalid_gzip")
                    self.assertEqual(response.code, 200)
                    self.assertEqual(response.body[:14], b"Hello World 0\n")
                except HTTPError:
                    pass  # acceptable
    
        def test_header_callback(self):
            first_line = []
            headers = {}
            chunks = []
    
            def header_callback(header_line):
                if header_line.startswith("HTTP/1.1 101"):
                    # Upgrading to HTTP/2
                    pass
                elif header_line.startswith("HTTP/"):
                    first_line.append(header_line)
                elif header_line != "\r\n":
                    k, v = header_line.split(":", 1)
                    headers[k.lower()] = v.strip()
    
            def streaming_callback(chunk):
                # All header callbacks are run before any streaming callbacks,
                # so the header data is available to process the data as it
                # comes in.
                self.assertEqual(headers["content-type"], "text/html; charset=UTF-8")
                chunks.append(chunk)
    
            self.fetch(
                "/chunk",
                header_callback=header_callback,
                streaming_callback=streaming_callback,
            )
            self.assertEqual(len(first_line), 1, first_line)
            self.assertRegexpMatches(first_line[0], "HTTP/[0-9]\\.[0-9] 200.*\r\n")
            self.assertEqual(chunks, [b"asdf", b"qwer"])
    
        @gen_test
        def test_configure_defaults(self):
            defaults = dict(user_agent="TestDefaultUserAgent", allow_ipv6=False)
            # Construct a new instance of the configured client class
            client = self.http_client.__class__(force_instance=True, defaults=defaults)
            try:
                response = yield client.fetch(self.get_url("/user_agent"))
                self.assertEqual(response.body, b"TestDefaultUserAgent")
            finally:
                client.close()
    
        def test_header_types(self):
            # Header values may be passed as character or utf8 byte strings,
            # in a plain dictionary or an HTTPHeaders object.
            # Keys must always be the native str type.
            # All combinations should have the same results on the wire.
            for value in [u"MyUserAgent", b"MyUserAgent"]:
                for container in [dict, HTTPHeaders]:
                    headers = container()
                    headers["User-Agent"] = value
                    resp = self.fetch("/user_agent", headers=headers)
                    self.assertEqual(
                        resp.body,
                        b"MyUserAgent",
                        "response=%r, value=%r, container=%r"
                        % (resp.body, value, container),
                    )
    
        def test_multi_line_headers(self):
            # Multi-line http headers are rare but rfc-allowed
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
            sock, port = bind_unused_port()
            with closing(sock):
    
                @gen.coroutine
                def accept_callback(conn, address):
                    stream = IOStream(conn)
                    request_data = yield stream.read_until(b"\r\n\r\n")
                    if b"HTTP/1." not in request_data:
                        self.skipTest("requires HTTP/1.x")
                    yield stream.write(
                        b"""\
    HTTP/1.1 200 OK
    X-XSS-Protection: 1;
    \tmode=block
    
    """.replace(
                            b"\n", b"\r\n"
                        )
                    )
                    stream.close()
    
                netutil.add_accept_handler(sock, accept_callback)  # type: ignore
                try:
                    resp = self.fetch("http://127.0.0.1:%d/" % port)
                    resp.rethrow()
                    self.assertEqual(resp.headers["X-XSS-Protection"], "1; mode=block")
                finally:
                    self.io_loop.remove_handler(sock.fileno())
    
        def test_304_with_content_length(self):
            # According to the spec 304 responses SHOULD NOT include
            # Content-Length or other entity headers, but some servers do it
            # anyway.
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
            response = self.fetch("/304_with_content_length")
            self.assertEqual(response.code, 304)
            self.assertEqual(response.headers["Content-Length"], "42")
    
        @gen_test
        def test_future_interface(self):
            response = yield self.http_client.fetch(self.get_url("/hello"))
            self.assertEqual(response.body, b"Hello world!")
    
        @gen_test
        def test_future_http_error(self):
            with self.assertRaises(HTTPError) as context:
                yield self.http_client.fetch(self.get_url("/notfound"))
            assert context.exception is not None
            assert context.exception.response is not None
            self.assertEqual(context.exception.code, 404)
            self.assertEqual(context.exception.response.code, 404)
    
        @gen_test
        def test_future_http_error_no_raise(self):
            response = yield self.http_client.fetch(
                self.get_url("/notfound"), raise_error=False
            )
            self.assertEqual(response.code, 404)
    
        @gen_test
        def test_reuse_request_from_response(self):
            # The response.request attribute should be an HTTPRequest, not
            # a _RequestProxy.
            # This test uses self.http_client.fetch because self.fetch calls
            # self.get_url on the input unconditionally.
            url = self.get_url("/hello")
            response = yield self.http_client.fetch(url)
            self.assertEqual(response.request.url, url)
            self.assertTrue(isinstance(response.request, HTTPRequest))
            response2 = yield self.http_client.fetch(response.request)
            self.assertEqual(response2.body, b"Hello world!")
    
        @gen_test
        def test_bind_source_ip(self):
            url = self.get_url("/hello")
            request = HTTPRequest(url, network_interface="127.0.0.1")
            response = yield self.http_client.fetch(request)
            self.assertEqual(response.code, 200)
    
            with self.assertRaises((ValueError, HTTPError)) as context:  # type: ignore
                request = HTTPRequest(url, network_interface="not-interface-or-ip")
                yield self.http_client.fetch(request)
            self.assertIn("not-interface-or-ip", str(context.exception))
    
        def test_all_methods(self):
            for method in ["GET", "DELETE", "OPTIONS"]:
                response = self.fetch("/all_methods", method=method)
                self.assertEqual(response.body, utf8(method))
            for method in ["POST", "PUT", "PATCH"]:
                response = self.fetch("/all_methods", method=method, body=b"")
                self.assertEqual(response.body, utf8(method))
            response = self.fetch("/all_methods", method="HEAD")
            self.assertEqual(response.body, b"")
            response = self.fetch(
                "/all_methods", method="OTHER", allow_nonstandard_methods=True
            )
            self.assertEqual(response.body, b"OTHER")
    
        def test_body_sanity_checks(self):
            # These methods require a body.
            for method in ("POST", "PUT", "PATCH"):
                with self.assertRaises(ValueError) as context:
                    self.fetch("/all_methods", method=method, raise_error=True)
                self.assertIn("must not be None", str(context.exception))
    
                resp = self.fetch(
                    "/all_methods", method=method, allow_nonstandard_methods=True
                )
                self.assertEqual(resp.code, 200)
    
            # These methods don't allow a body.
            for method in ("GET", "DELETE", "OPTIONS"):
                with self.assertRaises(ValueError) as context:
                    self.fetch(
                        "/all_methods", method=method, body=b"asdf", raise_error=True
                    )
                self.assertIn("must be None", str(context.exception))
    
                # In most cases this can be overridden, but curl_httpclient
                # does not allow body with a GET at all.
                if method != "GET":
                    self.fetch(
                        "/all_methods",
                        method=method,
                        body=b"asdf",
                        allow_nonstandard_methods=True,
                        raise_error=True,
                    )
                    self.assertEqual(resp.code, 200)
    
        # This test causes odd failures with the combination of
        # curl_httpclient (at least with the version of libcurl available
        # on ubuntu 12.04), TwistedIOLoop, and epoll.  For POST (but not PUT),
        # curl decides the response came back too soon and closes the connection
        # to start again.  It does this *before* telling the socket callback to
        # unregister the FD.  Some IOLoop implementations have special kernel
        # integration to discover this immediately.  Tornado's IOLoops
        # ignore errors on remove_handler to accommodate this behavior, but
        # Twisted's reactor does not.  The removeReader call fails and so
        # do all future removeAll calls (which our tests do at cleanup).
        #
        # def test_post_307(self):
        #    response = self.fetch("/redirect?status=307&url=/post",
        #                          method="POST", body=b"arg1=foo&arg2=bar")
        #    self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
    
        def test_put_307(self):
            response = self.fetch(
                "/redirect?status=307&url=/put", method="PUT", body=b"hello"
            )
            response.rethrow()
            self.assertEqual(response.body, b"Put body: hello")
    
        def test_non_ascii_header(self):
            # Non-ascii headers are sent as latin1.
            response = self.fetch("/set_header?k=foo&v=%E9")
            response.rethrow()
            self.assertEqual(response.headers["Foo"], native_str(u"\u00e9"))
    
        def test_response_times(self):
            # A few simple sanity checks of the response time fields to
            # make sure they're using the right basis (between the
            # wall-time and monotonic clocks).
            start_time = time.time()
            response = self.fetch("/hello")
            response.rethrow()
            self.assertGreaterEqual(response.request_time, 0)
            self.assertLess(response.request_time, 1.0)
            # A very crude check to make sure that start_time is based on
            # wall time and not the monotonic clock.
            assert response.start_time is not None
            self.assertLess(abs(response.start_time - start_time), 1.0)
    
            for k, v in response.time_info.items():
                self.assertTrue(0 <= v < 1.0, "time_info[%s] out of bounds: %s" % (k, v))
    
        def test_zero_timeout(self):
            response = self.fetch("/hello", connect_timeout=0)
            self.assertEqual(response.code, 200)
    
            response = self.fetch("/hello", request_timeout=0)
            self.assertEqual(response.code, 200)
    
            response = self.fetch("/hello", connect_timeout=0, request_timeout=0)
            self.assertEqual(response.code, 200)
    
        @gen_test
        def test_error_after_cancel(self):
            fut = self.http_client.fetch(self.get_url("/404"))
            self.assertTrue(fut.cancel())
            with ExpectLog(app_log, "Exception after Future was cancelled") as el:
                # We can't wait on the cancelled Future any more, so just
                # let the IOLoop run until the exception gets logged (or
                # not, in which case we exit the loop and ExpectLog will
                # raise).
                for i in range(100):
                    yield gen.sleep(0.01)
                    if el.logged_stack:
                        break
    
    
    class RequestProxyTest(unittest.TestCase):
        def test_request_set(self):
            proxy = _RequestProxy(
                HTTPRequest("http://example.com/", user_agent="foo"), dict()
            )
            self.assertEqual(proxy.user_agent, "foo")
    
        def test_default_set(self):
            proxy = _RequestProxy(
                HTTPRequest("http://example.com/"), dict(network_interface="foo")
            )
            self.assertEqual(proxy.network_interface, "foo")
    
        def test_both_set(self):
            proxy = _RequestProxy(
                HTTPRequest("http://example.com/", proxy_host="foo"), dict(proxy_host="bar")
            )
            self.assertEqual(proxy.proxy_host, "foo")
    
        def test_neither_set(self):
            proxy = _RequestProxy(HTTPRequest("http://example.com/"), dict())
            self.assertIs(proxy.auth_username, None)
    
        def test_bad_attribute(self):
            proxy = _RequestProxy(HTTPRequest("http://example.com/"), dict())
            with self.assertRaises(AttributeError):
                proxy.foo
    
        def test_defaults_none(self):
            proxy = _RequestProxy(HTTPRequest("http://example.com/"), None)
            self.assertIs(proxy.auth_username, None)
    
    
    class HTTPResponseTestCase(unittest.TestCase):
        def test_str(self):
            response = HTTPResponse(  # type: ignore
                HTTPRequest("http://example.com"), 200, buffer=BytesIO()
            )
            s = str(response)
            self.assertTrue(s.startswith("HTTPResponse("))
            self.assertIn("code=200", s)
    
    
    class SyncHTTPClientTest(unittest.TestCase):
        def setUp(self):
            self.server_ioloop = IOLoop()
            event = threading.Event()
    
            @gen.coroutine
            def init_server():
                sock, self.port = bind_unused_port()
                app = Application([("/", HelloWorldHandler)])
                self.server = HTTPServer(app)
                self.server.add_socket(sock)
                event.set()
    
            def start():
                self.server_ioloop.run_sync(init_server)
                self.server_ioloop.start()
    
            self.server_thread = threading.Thread(target=start)
            self.server_thread.start()
            event.wait()
    
            self.http_client = HTTPClient()
    
        def tearDown(self):
            def stop_server():
                self.server.stop()
                # Delay the shutdown of the IOLoop by several iterations because
                # the server may still have some cleanup work left when
                # the client finishes with the response (this is noticeable
                # with http/2, which leaves a Future with an unexamined
                # StreamClosedError on the loop).
    
                @gen.coroutine
                def slow_stop():
                    yield self.server.close_all_connections()
                    # The number of iterations is difficult to predict. Typically,
                    # one is sufficient, although sometimes it needs more.
                    for i in range(5):
                        yield
                    self.server_ioloop.stop()
    
                self.server_ioloop.add_callback(slow_stop)
    
            self.server_ioloop.add_callback(stop_server)
            self.server_thread.join()
            self.http_client.close()
            self.server_ioloop.close(all_fds=True)
    
        def get_url(self, path):
            return "http://127.0.0.1:%d%s" % (self.port, path)
    
        def test_sync_client(self):
            response = self.http_client.fetch(self.get_url("/"))
            self.assertEqual(b"Hello world!", response.body)
    
        def test_sync_client_error(self):
            # Synchronous HTTPClient raises errors directly; no need for
            # response.rethrow()
            with self.assertRaises(HTTPError) as assertion:
                self.http_client.fetch(self.get_url("/notfound"))
            self.assertEqual(assertion.exception.code, 404)
    
    
    class SyncHTTPClientSubprocessTest(unittest.TestCase):
        def test_destructor_log(self):
            # Regression test for
            # https://github.com/tornadoweb/tornado/issues/2539
            #
            # In the past, the following program would log an
            # "inconsistent AsyncHTTPClient cache" error from a destructor
            # when the process is shutting down. The shutdown process is
            # subtle and I don't fully understand it; the failure does not
            # manifest if that lambda isn't there or is a simpler object
            # like an int (nor does it manifest in the tornado test suite
            # as a whole, which is why we use this subprocess).
            proc = subprocess.run(
                [
                    sys.executable,
                    "-c",
                    "from tornado.httpclient import HTTPClient; f = lambda: None; c = HTTPClient()",
                ],
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,
                check=True,
                timeout=5,
            )
            if proc.stdout:
                print("STDOUT:")
                print(to_unicode(proc.stdout))
            if proc.stdout:
                self.fail("subprocess produced unexpected output")
    
    
    class HTTPRequestTestCase(unittest.TestCase):
        def test_headers(self):
            request = HTTPRequest("http://example.com", headers={"foo": "bar"})
            self.assertEqual(request.headers, {"foo": "bar"})
    
        def test_headers_setter(self):
            request = HTTPRequest("http://example.com")
            request.headers = {"bar": "baz"}  # type: ignore
            self.assertEqual(request.headers, {"bar": "baz"})
    
        def test_null_headers_setter(self):
            request = HTTPRequest("http://example.com")
            request.headers = None  # type: ignore
            self.assertEqual(request.headers, {})
    
        def test_body(self):
            request = HTTPRequest("http://example.com", body="foo")
            self.assertEqual(request.body, utf8("foo"))
    
        def test_body_setter(self):
            request = HTTPRequest("http://example.com")
            request.body = "foo"  # type: ignore
            self.assertEqual(request.body, utf8("foo"))
    
        def test_if_modified_since(self):
            http_date = datetime.datetime.utcnow()
            request = HTTPRequest("http://example.com", if_modified_since=http_date)
            self.assertEqual(
                request.headers, {"If-Modified-Since": format_timestamp(http_date)}
            )
    
    
    class HTTPErrorTestCase(unittest.TestCase):
        def test_copy(self):
            e = HTTPError(403)
            e2 = copy.copy(e)
            self.assertIsNot(e, e2)
            self.assertEqual(e.code, e2.code)
    
        def test_plain_error(self):
            e = HTTPError(403)
            self.assertEqual(str(e), "HTTP 403: Forbidden")
            self.assertEqual(repr(e), "HTTP 403: Forbidden")
    
        def test_error_with_response(self):
            resp = HTTPResponse(HTTPRequest("http://example.com/"), 403)
            with self.assertRaises(HTTPError) as cm:
                resp.rethrow()
            e = cm.exception
            self.assertEqual(str(e), "HTTP 403: Forbidden")
            self.assertEqual(repr(e), "HTTP 403: Forbidden")
    tornado-6.1.0/tornado/test/httpserver_test.py000066400000000000000000001326341374705040500214460ustar00rootroot00000000000000from tornado import gen, netutil
    from tornado.escape import (
        json_decode,
        json_encode,
        utf8,
        _unicode,
        recursive_unicode,
        native_str,
    )
    from tornado.http1connection import HTTP1Connection
    from tornado.httpclient import HTTPError
    from tornado.httpserver import HTTPServer
    from tornado.httputil import (
        HTTPHeaders,
        HTTPMessageDelegate,
        HTTPServerConnectionDelegate,
        ResponseStartLine,
    )
    from tornado.iostream import IOStream
    from tornado.locks import Event
    from tornado.log import gen_log
    from tornado.netutil import ssl_options_to_context
    from tornado.simple_httpclient import SimpleAsyncHTTPClient
    from tornado.testing import (
        AsyncHTTPTestCase,
        AsyncHTTPSTestCase,
        AsyncTestCase,
        ExpectLog,
        gen_test,
    )
    from tornado.test.util import skipOnTravis
    from tornado.web import Application, RequestHandler, stream_request_body
    
    from contextlib import closing
    import datetime
    import gzip
    import logging
    import os
    import shutil
    import socket
    import ssl
    import sys
    import tempfile
    import unittest
    import urllib.parse
    from io import BytesIO
    
    import typing
    
    if typing.TYPE_CHECKING:
        from typing import Dict, List  # noqa: F401
    
    
    async def read_stream_body(stream):
        """Reads an HTTP response from `stream` and returns a tuple of its
        start_line, headers and body."""
        chunks = []
    
        class Delegate(HTTPMessageDelegate):
            def headers_received(self, start_line, headers):
                self.headers = headers
                self.start_line = start_line
    
            def data_received(self, chunk):
                chunks.append(chunk)
    
            def finish(self):
                conn.detach()  # type: ignore
    
        conn = HTTP1Connection(stream, True)
        delegate = Delegate()
        await conn.read_response(delegate)
        return delegate.start_line, delegate.headers, b"".join(chunks)
    
    
    class HandlerBaseTestCase(AsyncHTTPTestCase):
        Handler = None
    
        def get_app(self):
            return Application([("/", self.__class__.Handler)])
    
        def fetch_json(self, *args, **kwargs):
            response = self.fetch(*args, **kwargs)
            response.rethrow()
            return json_decode(response.body)
    
    
    class HelloWorldRequestHandler(RequestHandler):
        def initialize(self, protocol="http"):
            self.expected_protocol = protocol
    
        def get(self):
            if self.request.protocol != self.expected_protocol:
                raise Exception("unexpected protocol")
            self.finish("Hello world")
    
        def post(self):
            self.finish("Got %d bytes in POST" % len(self.request.body))
    
    
    # In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
    # ClientHello messages, which are rejected by SSLv3 and TLSv1
    # servers.  Note that while the OPENSSL_VERSION_INFO was formally
    # introduced in python3.2, it was present but undocumented in
    # python 2.7
    skipIfOldSSL = unittest.skipIf(
        getattr(ssl, "OPENSSL_VERSION_INFO", (0, 0)) < (1, 0),
        "old version of ssl module and/or openssl",
    )
    
    
    class BaseSSLTest(AsyncHTTPSTestCase):
        def get_app(self):
            return Application([("/", HelloWorldRequestHandler, dict(protocol="https"))])
    
    
    class SSLTestMixin(object):
        def get_ssl_options(self):
            return dict(
                ssl_version=self.get_ssl_version(),
                **AsyncHTTPSTestCase.default_ssl_options()
            )
    
        def get_ssl_version(self):
            raise NotImplementedError()
    
        def test_ssl(self: typing.Any):
            response = self.fetch("/")
            self.assertEqual(response.body, b"Hello world")
    
        def test_large_post(self: typing.Any):
            response = self.fetch("/", method="POST", body="A" * 5000)
            self.assertEqual(response.body, b"Got 5000 bytes in POST")
    
        def test_non_ssl_request(self: typing.Any):
            # Make sure the server closes the connection when it gets a non-ssl
            # connection, rather than waiting for a timeout or otherwise
            # misbehaving.
            with ExpectLog(gen_log, "(SSL Error|uncaught exception)"):
                with ExpectLog(gen_log, "Uncaught exception", required=False):
                    with self.assertRaises((IOError, HTTPError)):  # type: ignore
                        self.fetch(
                            self.get_url("/").replace("https:", "http:"),
                            request_timeout=3600,
                            connect_timeout=3600,
                            raise_error=True,
                        )
    
        def test_error_logging(self: typing.Any):
            # No stack traces are logged for SSL errors.
            with ExpectLog(gen_log, "SSL Error") as expect_log:
                with self.assertRaises((IOError, HTTPError)):  # type: ignore
                    self.fetch(
                        self.get_url("/").replace("https:", "http:"), raise_error=True
                    )
            self.assertFalse(expect_log.logged_stack)
    
    
    # Python's SSL implementation differs significantly between versions.
    # For example, SSLv3 and TLSv1 throw an exception if you try to read
    # from the socket before the handshake is complete, but the default
    # of SSLv23 allows it.
    
    
    class SSLv23Test(BaseSSLTest, SSLTestMixin):
        def get_ssl_version(self):
            return ssl.PROTOCOL_SSLv23
    
    
    @skipIfOldSSL
    class SSLv3Test(BaseSSLTest, SSLTestMixin):
        def get_ssl_version(self):
            return ssl.PROTOCOL_SSLv3
    
    
    @skipIfOldSSL
    class TLSv1Test(BaseSSLTest, SSLTestMixin):
        def get_ssl_version(self):
            return ssl.PROTOCOL_TLSv1
    
    
    class SSLContextTest(BaseSSLTest, SSLTestMixin):
        def get_ssl_options(self):
            context = ssl_options_to_context(AsyncHTTPSTestCase.get_ssl_options(self))
            assert isinstance(context, ssl.SSLContext)
            return context
    
    
    class BadSSLOptionsTest(unittest.TestCase):
        def test_missing_arguments(self):
            application = Application()
            self.assertRaises(
                KeyError,
                HTTPServer,
                application,
                ssl_options={"keyfile": "/__missing__.crt"},
            )
    
        def test_missing_key(self):
            """A missing SSL key should cause an immediate exception."""
    
            application = Application()
            module_dir = os.path.dirname(__file__)
            existing_certificate = os.path.join(module_dir, "test.crt")
            existing_key = os.path.join(module_dir, "test.key")
    
            self.assertRaises(
                (ValueError, IOError),
                HTTPServer,
                application,
                ssl_options={"certfile": "/__mising__.crt"},
            )
            self.assertRaises(
                (ValueError, IOError),
                HTTPServer,
                application,
                ssl_options={
                    "certfile": existing_certificate,
                    "keyfile": "/__missing__.key",
                },
            )
    
            # This actually works because both files exist
            HTTPServer(
                application,
                ssl_options={"certfile": existing_certificate, "keyfile": existing_key},
            )
    
    
    class MultipartTestHandler(RequestHandler):
        def post(self):
            self.finish(
                {
                    "header": self.request.headers["X-Header-Encoding-Test"],
                    "argument": self.get_argument("argument"),
                    "filename": self.request.files["files"][0].filename,
                    "filebody": _unicode(self.request.files["files"][0]["body"]),
                }
            )
    
    
    # This test is also called from wsgi_test
    class HTTPConnectionTest(AsyncHTTPTestCase):
        def get_handlers(self):
            return [
                ("/multipart", MultipartTestHandler),
                ("/hello", HelloWorldRequestHandler),
            ]
    
        def get_app(self):
            return Application(self.get_handlers())
    
        def raw_fetch(self, headers, body, newline=b"\r\n"):
            with closing(IOStream(socket.socket())) as stream:
                self.io_loop.run_sync(
                    lambda: stream.connect(("127.0.0.1", self.get_http_port()))
                )
                stream.write(
                    newline.join(headers + [utf8("Content-Length: %d" % len(body))])
                    + newline
                    + newline
                    + body
                )
                start_line, headers, body = self.io_loop.run_sync(
                    lambda: read_stream_body(stream)
                )
                return body
    
        def test_multipart_form(self):
            # Encodings here are tricky:  Headers are latin1, bodies can be
            # anything (we use utf8 by default).
            response = self.raw_fetch(
                [
                    b"POST /multipart HTTP/1.0",
                    b"Content-Type: multipart/form-data; boundary=1234567890",
                    b"X-Header-encoding-test: \xe9",
                ],
                b"\r\n".join(
                    [
                        b"Content-Disposition: form-data; name=argument",
                        b"",
                        u"\u00e1".encode("utf-8"),
                        b"--1234567890",
                        u'Content-Disposition: form-data; name="files"; filename="\u00f3"'.encode(
                            "utf8"
                        ),
                        b"",
                        u"\u00fa".encode("utf-8"),
                        b"--1234567890--",
                        b"",
                    ]
                ),
            )
            data = json_decode(response)
            self.assertEqual(u"\u00e9", data["header"])
            self.assertEqual(u"\u00e1", data["argument"])
            self.assertEqual(u"\u00f3", data["filename"])
            self.assertEqual(u"\u00fa", data["filebody"])
    
        def test_newlines(self):
            # We support both CRLF and bare LF as line separators.
            for newline in (b"\r\n", b"\n"):
                response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"", newline=newline)
                self.assertEqual(response, b"Hello world")
    
        @gen_test
        def test_100_continue(self):
            # Run through a 100-continue interaction by hand:
            # When given Expect: 100-continue, we get a 100 response after the
            # headers, and then the real response after the body.
            stream = IOStream(socket.socket())
            yield stream.connect(("127.0.0.1", self.get_http_port()))
            yield stream.write(
                b"\r\n".join(
                    [
                        b"POST /hello HTTP/1.1",
                        b"Content-Length: 1024",
                        b"Expect: 100-continue",
                        b"Connection: close",
                        b"\r\n",
                    ]
                )
            )
            data = yield stream.read_until(b"\r\n\r\n")
            self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
            stream.write(b"a" * 1024)
            first_line = yield stream.read_until(b"\r\n")
            self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
            header_data = yield stream.read_until(b"\r\n\r\n")
            headers = HTTPHeaders.parse(native_str(header_data.decode("latin1")))
            body = yield stream.read_bytes(int(headers["Content-Length"]))
            self.assertEqual(body, b"Got 1024 bytes in POST")
            stream.close()
    
    
    class EchoHandler(RequestHandler):
        def get(self):
            self.write(recursive_unicode(self.request.arguments))
    
        def post(self):
            self.write(recursive_unicode(self.request.arguments))
    
    
    class TypeCheckHandler(RequestHandler):
        def prepare(self):
            self.errors = {}  # type: Dict[str, str]
            fields = [
                ("method", str),
                ("uri", str),
                ("version", str),
                ("remote_ip", str),
                ("protocol", str),
                ("host", str),
                ("path", str),
                ("query", str),
            ]
            for field, expected_type in fields:
                self.check_type(field, getattr(self.request, field), expected_type)
    
            self.check_type("header_key", list(self.request.headers.keys())[0], str)
            self.check_type("header_value", list(self.request.headers.values())[0], str)
    
            self.check_type("cookie_key", list(self.request.cookies.keys())[0], str)
            self.check_type(
                "cookie_value", list(self.request.cookies.values())[0].value, str
            )
            # secure cookies
    
            self.check_type("arg_key", list(self.request.arguments.keys())[0], str)
            self.check_type("arg_value", list(self.request.arguments.values())[0][0], bytes)
    
        def post(self):
            self.check_type("body", self.request.body, bytes)
            self.write(self.errors)
    
        def get(self):
            self.write(self.errors)
    
        def check_type(self, name, obj, expected_type):
            actual_type = type(obj)
            if expected_type != actual_type:
                self.errors[name] = "expected %s, got %s" % (expected_type, actual_type)
    
    
    class PostEchoHandler(RequestHandler):
        def post(self, *path_args):
            self.write(dict(echo=self.get_argument("data")))
    
    
    class PostEchoGBKHandler(PostEchoHandler):
        def decode_argument(self, value, name=None):
            try:
                return value.decode("gbk")
            except Exception:
                raise HTTPError(400, "invalid gbk bytes: %r" % value)
    
    
    class HTTPServerTest(AsyncHTTPTestCase):
        def get_app(self):
            return Application(
                [
                    ("/echo", EchoHandler),
                    ("/typecheck", TypeCheckHandler),
                    ("//doubleslash", EchoHandler),
                    ("/post_utf8", PostEchoHandler),
                    ("/post_gbk", PostEchoGBKHandler),
                ]
            )
    
        def test_query_string_encoding(self):
            response = self.fetch("/echo?foo=%C3%A9")
            data = json_decode(response.body)
            self.assertEqual(data, {u"foo": [u"\u00e9"]})
    
        def test_empty_query_string(self):
            response = self.fetch("/echo?foo=&foo=")
            data = json_decode(response.body)
            self.assertEqual(data, {u"foo": [u"", u""]})
    
        def test_empty_post_parameters(self):
            response = self.fetch("/echo", method="POST", body="foo=&bar=")
            data = json_decode(response.body)
            self.assertEqual(data, {u"foo": [u""], u"bar": [u""]})
    
        def test_types(self):
            headers = {"Cookie": "foo=bar"}
            response = self.fetch("/typecheck?foo=bar", headers=headers)
            data = json_decode(response.body)
            self.assertEqual(data, {})
    
            response = self.fetch(
                "/typecheck", method="POST", body="foo=bar", headers=headers
            )
            data = json_decode(response.body)
            self.assertEqual(data, {})
    
        def test_double_slash(self):
            # urlparse.urlsplit (which tornado.httpserver used to use
            # incorrectly) would parse paths beginning with "//" as
            # protocol-relative urls.
            response = self.fetch("//doubleslash")
            self.assertEqual(200, response.code)
            self.assertEqual(json_decode(response.body), {})
    
        def test_post_encodings(self):
            headers = {"Content-Type": "application/x-www-form-urlencoded"}
            uni_text = "chinese: \u5f20\u4e09"
            for enc in ("utf8", "gbk"):
                for quote in (True, False):
                    with self.subTest(enc=enc, quote=quote):
                        bin_text = uni_text.encode(enc)
                        if quote:
                            bin_text = urllib.parse.quote(bin_text).encode("ascii")
                        response = self.fetch(
                            "/post_" + enc,
                            method="POST",
                            headers=headers,
                            body=(b"data=" + bin_text),
                        )
                        self.assertEqual(json_decode(response.body), {"echo": uni_text})
    
    
    class HTTPServerRawTest(AsyncHTTPTestCase):
        def get_app(self):
            return Application([("/echo", EchoHandler)])
    
        def setUp(self):
            super().setUp()
            self.stream = IOStream(socket.socket())
            self.io_loop.run_sync(
                lambda: self.stream.connect(("127.0.0.1", self.get_http_port()))
            )
    
        def tearDown(self):
            self.stream.close()
            super().tearDown()
    
        def test_empty_request(self):
            self.stream.close()
            self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
            self.wait()
    
        def test_malformed_first_line_response(self):
            with ExpectLog(gen_log, ".*Malformed HTTP request line", level=logging.INFO):
                self.stream.write(b"asdf\r\n\r\n")
                start_line, headers, response = self.io_loop.run_sync(
                    lambda: read_stream_body(self.stream)
                )
                self.assertEqual("HTTP/1.1", start_line.version)
                self.assertEqual(400, start_line.code)
                self.assertEqual("Bad Request", start_line.reason)
    
        def test_malformed_first_line_log(self):
            with ExpectLog(gen_log, ".*Malformed HTTP request line", level=logging.INFO):
                self.stream.write(b"asdf\r\n\r\n")
                # TODO: need an async version of ExpectLog so we don't need
                # hard-coded timeouts here.
                self.io_loop.add_timeout(datetime.timedelta(seconds=0.05), self.stop)
                self.wait()
    
        def test_malformed_headers(self):
            with ExpectLog(
                gen_log,
                ".*Malformed HTTP message.*no colon in header line",
                level=logging.INFO,
            ):
                self.stream.write(b"GET / HTTP/1.0\r\nasdf\r\n\r\n")
                self.io_loop.add_timeout(datetime.timedelta(seconds=0.05), self.stop)
                self.wait()
    
        def test_chunked_request_body(self):
            # Chunked requests are not widely supported and we don't have a way
            # to generate them in AsyncHTTPClient, but HTTPServer will read them.
            self.stream.write(
                b"""\
    POST /echo HTTP/1.1
    Transfer-Encoding: chunked
    Content-Type: application/x-www-form-urlencoded
    
    4
    foo=
    3
    bar
    0
    
    """.replace(
                    b"\n", b"\r\n"
                )
            )
            start_line, headers, response = self.io_loop.run_sync(
                lambda: read_stream_body(self.stream)
            )
            self.assertEqual(json_decode(response), {u"foo": [u"bar"]})
    
        def test_chunked_request_uppercase(self):
            # As per RFC 2616 section 3.6, "Transfer-Encoding" header's value is
            # case-insensitive.
            self.stream.write(
                b"""\
    POST /echo HTTP/1.1
    Transfer-Encoding: Chunked
    Content-Type: application/x-www-form-urlencoded
    
    4
    foo=
    3
    bar
    0
    
    """.replace(
                    b"\n", b"\r\n"
                )
            )
            start_line, headers, response = self.io_loop.run_sync(
                lambda: read_stream_body(self.stream)
            )
            self.assertEqual(json_decode(response), {u"foo": [u"bar"]})
    
        @gen_test
        def test_invalid_content_length(self):
            with ExpectLog(
                gen_log, ".*Only integer Content-Length is allowed", level=logging.INFO
            ):
                self.stream.write(
                    b"""\
    POST /echo HTTP/1.1
    Content-Length: foo
    
    bar
    
    """.replace(
                        b"\n", b"\r\n"
                    )
                )
                yield self.stream.read_until_close()
    
    
    class XHeaderTest(HandlerBaseTestCase):
        class Handler(RequestHandler):
            def get(self):
                self.set_header("request-version", self.request.version)
                self.write(
                    dict(
                        remote_ip=self.request.remote_ip,
                        remote_protocol=self.request.protocol,
                    )
                )
    
        def get_httpserver_options(self):
            return dict(xheaders=True, trusted_downstream=["5.5.5.5"])
    
        def test_ip_headers(self):
            self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1")
    
            valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
            self.assertEqual(
                self.fetch_json("/", headers=valid_ipv4)["remote_ip"], "4.4.4.4"
            )
    
            valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"}
            self.assertEqual(
                self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"], "4.4.4.4"
            )
    
            valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
            self.assertEqual(
                self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
                "2620:0:1cfe:face:b00c::3",
            )
    
            valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"}
            self.assertEqual(
                self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"],
                "2620:0:1cfe:face:b00c::3",
            )
    
            invalid_chars = {"X-Real-IP": "4.4.4.4
    
    '
                for p in paths
            )
    
        def render_embed_js(self, js_embed: Iterable[bytes]) -> bytes:
            """Default method used to render the final embedded js for the
            rendered webpage.
    
            Override this method in a sub-classed controller to change the output.
            """
            return (
                b'"
            )
    
        def render_linked_css(self, css_files: Iterable[str]) -> str:
            """Default method used to render the final css links for the
            rendered webpage.
    
            Override this method in a sub-classed controller to change the output.
            """
            paths = []
            unique_paths = set()  # type: Set[str]
    
            for path in css_files:
                if not is_absolute(path):
                    path = self.static_url(path)
                if path not in unique_paths:
                    paths.append(path)
                    unique_paths.add(path)
    
            return "".join(
                ''
                for p in paths
            )
    
        def render_embed_css(self, css_embed: Iterable[bytes]) -> bytes:
            """Default method used to render the final embedded css for the
            rendered webpage.
    
            Override this method in a sub-classed controller to change the output.
            """
            return b'"
    
        def render_string(self, template_name: str, **kwargs: Any) -> bytes:
            """Generate the given template with the given arguments.
    
            We return the generated byte string (in utf8). To generate and
            write a template as a response, use render() above.
            """
            # If no template_path is specified, use the path of the calling file
            template_path = self.get_template_path()
            if not template_path:
                frame = sys._getframe(0)
                web_file = frame.f_code.co_filename
                while frame.f_code.co_filename == web_file:
                    frame = frame.f_back
                assert frame.f_code.co_filename is not None
                template_path = os.path.dirname(frame.f_code.co_filename)
            with RequestHandler._template_loader_lock:
                if template_path not in RequestHandler._template_loaders:
                    loader = self.create_template_loader(template_path)
                    RequestHandler._template_loaders[template_path] = loader
                else:
                    loader = RequestHandler._template_loaders[template_path]
            t = loader.load(template_name)
            namespace = self.get_template_namespace()
            namespace.update(kwargs)
            return t.generate(**namespace)
    
        def get_template_namespace(self) -> Dict[str, Any]:
            """Returns a dictionary to be used as the default template namespace.
    
            May be overridden by subclasses to add or modify values.
    
            The results of this method will be combined with additional
            defaults in the `tornado.template` module and keyword arguments
            to `render` or `render_string`.
            """
            namespace = dict(
                handler=self,
                request=self.request,
                current_user=self.current_user,
                locale=self.locale,
                _=self.locale.translate,
                pgettext=self.locale.pgettext,
                static_url=self.static_url,
                xsrf_form_html=self.xsrf_form_html,
                reverse_url=self.reverse_url,
            )
            namespace.update(self.ui)
            return namespace
    
        def create_template_loader(self, template_path: str) -> template.BaseLoader:
            """Returns a new template loader for the given path.
    
            May be overridden by subclasses.  By default returns a
            directory-based loader on the given path, using the
            ``autoescape`` and ``template_whitespace`` application
            settings.  If a ``template_loader`` application setting is
            supplied, uses that instead.
            """
            settings = self.application.settings
            if "template_loader" in settings:
                return settings["template_loader"]
            kwargs = {}
            if "autoescape" in settings:
                # autoescape=None means "no escaping", so we have to be sure
                # to only pass this kwarg if the user asked for it.
                kwargs["autoescape"] = settings["autoescape"]
            if "template_whitespace" in settings:
                kwargs["whitespace"] = settings["template_whitespace"]
            return template.Loader(template_path, **kwargs)
    
        def flush(self, include_footers: bool = False) -> "Future[None]":
            """Flushes the current output buffer to the network.
    
            .. versionchanged:: 4.0
               Now returns a `.Future` if no callback is given.
    
            .. versionchanged:: 6.0
    
               The ``callback`` argument was removed.
            """
            assert self.request.connection is not None
            chunk = b"".join(self._write_buffer)
            self._write_buffer = []
            if not self._headers_written:
                self._headers_written = True
                for transform in self._transforms:
                    assert chunk is not None
                    (
                        self._status_code,
                        self._headers,
                        chunk,
                    ) = transform.transform_first_chunk(
                        self._status_code, self._headers, chunk, include_footers
                    )
                # Ignore the chunk and only write the headers for HEAD requests
                if self.request.method == "HEAD":
                    chunk = b""
    
                # Finalize the cookie headers (which have been stored in a side
                # object so an outgoing cookie could be overwritten before it
                # is sent).
                if hasattr(self, "_new_cookie"):
                    for cookie in self._new_cookie.values():
                        self.add_header("Set-Cookie", cookie.OutputString(None))
    
                start_line = httputil.ResponseStartLine("", self._status_code, self._reason)
                return self.request.connection.write_headers(
                    start_line, self._headers, chunk
                )
            else:
                for transform in self._transforms:
                    chunk = transform.transform_chunk(chunk, include_footers)
                # Ignore the chunk and only write the headers for HEAD requests
                if self.request.method != "HEAD":
                    return self.request.connection.write(chunk)
                else:
                    future = Future()  # type: Future[None]
                    future.set_result(None)
                    return future
    
        def finish(self, chunk: Optional[Union[str, bytes, dict]] = None) -> "Future[None]":
            """Finishes this response, ending the HTTP request.
    
            Passing a ``chunk`` to ``finish()`` is equivalent to passing that
            chunk to ``write()`` and then calling ``finish()`` with no arguments.
    
            Returns a `.Future` which may optionally be awaited to track the sending
            of the response to the client. This `.Future` resolves when all the response
            data has been sent, and raises an error if the connection is closed before all
            data can be sent.
    
            .. versionchanged:: 5.1
    
               Now returns a `.Future` instead of ``None``.
            """
            if self._finished:
                raise RuntimeError("finish() called twice")
    
            if chunk is not None:
                self.write(chunk)
    
            # Automatically support ETags and add the Content-Length header if
            # we have not flushed any content yet.
            if not self._headers_written:
                if (
                    self._status_code == 200
                    and self.request.method in ("GET", "HEAD")
                    and "Etag" not in self._headers
                ):
                    self.set_etag_header()
                    if self.check_etag_header():
                        self._write_buffer = []
                        self.set_status(304)
                if self._status_code in (204, 304) or (100 <= self._status_code < 200):
                    assert not self._write_buffer, (
                        "Cannot send body with %s" % self._status_code
                    )
                    self._clear_representation_headers()
                elif "Content-Length" not in self._headers:
                    content_length = sum(len(part) for part in self._write_buffer)
                    self.set_header("Content-Length", content_length)
    
            assert self.request.connection is not None
            # Now that the request is finished, clear the callback we
            # set on the HTTPConnection (which would otherwise prevent the
            # garbage collection of the RequestHandler when there
            # are keepalive connections)
            self.request.connection.set_close_callback(None)  # type: ignore
    
            future = self.flush(include_footers=True)
            self.request.connection.finish()
            self._log()
            self._finished = True
            self.on_finish()
            self._break_cycles()
            return future
    
        def detach(self) -> iostream.IOStream:
            """Take control of the underlying stream.
    
            Returns the underlying `.IOStream` object and stops all
            further HTTP processing. Intended for implementing protocols
            like websockets that tunnel over an HTTP handshake.
    
            This method is only supported when HTTP/1.1 is used.
    
            .. versionadded:: 5.1
            """
            self._finished = True
            # TODO: add detach to HTTPConnection?
            return self.request.connection.detach()  # type: ignore
    
        def _break_cycles(self) -> None:
            # Break up a reference cycle between this handler and the
            # _ui_module closures to allow for faster GC on CPython.
            self.ui = None  # type: ignore
    
        def send_error(self, status_code: int = 500, **kwargs: Any) -> None:
            """Sends the given HTTP error code to the browser.
    
            If `flush()` has already been called, it is not possible to send
            an error, so this method will simply terminate the response.
            If output has been written but not yet flushed, it will be discarded
            and replaced with the error page.
    
            Override `write_error()` to customize the error page that is returned.
            Additional keyword arguments are passed through to `write_error`.
            """
            if self._headers_written:
                gen_log.error("Cannot send error response after headers written")
                if not self._finished:
                    # If we get an error between writing headers and finishing,
                    # we are unlikely to be able to finish due to a
                    # Content-Length mismatch. Try anyway to release the
                    # socket.
                    try:
                        self.finish()
                    except Exception:
                        gen_log.error("Failed to flush partial response", exc_info=True)
                return
            self.clear()
    
            reason = kwargs.get("reason")
            if "exc_info" in kwargs:
                exception = kwargs["exc_info"][1]
                if isinstance(exception, HTTPError) and exception.reason:
                    reason = exception.reason
            self.set_status(status_code, reason=reason)
            try:
                self.write_error(status_code, **kwargs)
            except Exception:
                app_log.error("Uncaught exception in write_error", exc_info=True)
            if not self._finished:
                self.finish()
    
        def write_error(self, status_code: int, **kwargs: Any) -> None:
            """Override to implement custom error pages.
    
            ``write_error`` may call `write`, `render`, `set_header`, etc
            to produce output as usual.
    
            If this error was caused by an uncaught exception (including
            HTTPError), an ``exc_info`` triple will be available as
            ``kwargs["exc_info"]``.  Note that this exception may not be
            the "current" exception for purposes of methods like
            ``sys.exc_info()`` or ``traceback.format_exc``.
            """
            if self.settings.get("serve_traceback") and "exc_info" in kwargs:
                # in debug mode, try to send a traceback
                self.set_header("Content-Type", "text/plain")
                for line in traceback.format_exception(*kwargs["exc_info"]):
                    self.write(line)
                self.finish()
            else:
                self.finish(
                    "%(code)d: %(message)s"
                    "%(code)d: %(message)s"
                    % {"code": status_code, "message": self._reason}
                )
    
        @property
        def locale(self) -> tornado.locale.Locale:
            """The locale for the current session.
    
            Determined by either `get_user_locale`, which you can override to
            set the locale based on, e.g., a user preference stored in a
            database, or `get_browser_locale`, which uses the ``Accept-Language``
            header.
    
            .. versionchanged: 4.1
               Added a property setter.
            """
            if not hasattr(self, "_locale"):
                loc = self.get_user_locale()
                if loc is not None:
                    self._locale = loc
                else:
                    self._locale = self.get_browser_locale()
                    assert self._locale
            return self._locale
    
        @locale.setter
        def locale(self, value: tornado.locale.Locale) -> None:
            self._locale = value
    
        def get_user_locale(self) -> Optional[tornado.locale.Locale]:
            """Override to determine the locale from the authenticated user.
    
            If None is returned, we fall back to `get_browser_locale()`.
    
            This method should return a `tornado.locale.Locale` object,
            most likely obtained via a call like ``tornado.locale.get("en")``
            """
            return None
    
        def get_browser_locale(self, default: str = "en_US") -> tornado.locale.Locale:
            """Determines the user's locale from ``Accept-Language`` header.
    
            See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
            """
            if "Accept-Language" in self.request.headers:
                languages = self.request.headers["Accept-Language"].split(",")
                locales = []
                for language in languages:
                    parts = language.strip().split(";")
                    if len(parts) > 1 and parts[1].startswith("q="):
                        try:
                            score = float(parts[1][2:])
                        except (ValueError, TypeError):
                            score = 0.0
                    else:
                        score = 1.0
                    locales.append((parts[0], score))
                if locales:
                    locales.sort(key=lambda pair: pair[1], reverse=True)
                    codes = [loc[0] for loc in locales]
                    return locale.get(*codes)
            return locale.get(default)
    
        @property
        def current_user(self) -> Any:
            """The authenticated user for this request.
    
            This is set in one of two ways:
    
            * A subclass may override `get_current_user()`, which will be called
              automatically the first time ``self.current_user`` is accessed.
              `get_current_user()` will only be called once per request,
              and is cached for future access::
    
                  def get_current_user(self):
                      user_cookie = self.get_secure_cookie("user")
                      if user_cookie:
                          return json.loads(user_cookie)
                      return None
    
            * It may be set as a normal variable, typically from an overridden
              `prepare()`::
    
                  @gen.coroutine
                  def prepare(self):
                      user_id_cookie = self.get_secure_cookie("user_id")
                      if user_id_cookie:
                          self.current_user = yield load_user(user_id_cookie)
    
            Note that `prepare()` may be a coroutine while `get_current_user()`
            may not, so the latter form is necessary if loading the user requires
            asynchronous operations.
    
            The user object may be any type of the application's choosing.
            """
            if not hasattr(self, "_current_user"):
                self._current_user = self.get_current_user()
            return self._current_user
    
        @current_user.setter
        def current_user(self, value: Any) -> None:
            self._current_user = value
    
        def get_current_user(self) -> Any:
            """Override to determine the current user from, e.g., a cookie.
    
            This method may not be a coroutine.
            """
            return None
    
        def get_login_url(self) -> str:
            """Override to customize the login URL based on the request.
    
            By default, we use the ``login_url`` application setting.
            """
            self.require_setting("login_url", "@tornado.web.authenticated")
            return self.application.settings["login_url"]
    
        def get_template_path(self) -> Optional[str]:
            """Override to customize template path for each handler.
    
            By default, we use the ``template_path`` application setting.
            Return None to load templates relative to the calling file.
            """
            return self.application.settings.get("template_path")
    
        @property
        def xsrf_token(self) -> bytes:
            """The XSRF-prevention token for the current user/session.
    
            To prevent cross-site request forgery, we set an '_xsrf' cookie
            and include the same '_xsrf' value as an argument with all POST
            requests. If the two do not match, we reject the form submission
            as a potential forgery.
    
            See http://en.wikipedia.org/wiki/Cross-site_request_forgery
    
            This property is of type `bytes`, but it contains only ASCII
            characters. If a character string is required, there is no
            need to base64-encode it; just decode the byte string as
            UTF-8.
    
            .. versionchanged:: 3.2.2
               The xsrf token will now be have a random mask applied in every
               request, which makes it safe to include the token in pages
               that are compressed.  See http://breachattack.com for more
               information on the issue fixed by this change.  Old (version 1)
               cookies will be converted to version 2 when this method is called
               unless the ``xsrf_cookie_version`` `Application` setting is
               set to 1.
    
            .. versionchanged:: 4.3
               The ``xsrf_cookie_kwargs`` `Application` setting may be
               used to supply additional cookie options (which will be
               passed directly to `set_cookie`). For example,
               ``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
               will set the ``secure`` and ``httponly`` flags on the
               ``_xsrf`` cookie.
            """
            if not hasattr(self, "_xsrf_token"):
                version, token, timestamp = self._get_raw_xsrf_token()
                output_version = self.settings.get("xsrf_cookie_version", 2)
                cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
                if output_version == 1:
                    self._xsrf_token = binascii.b2a_hex(token)
                elif output_version == 2:
                    mask = os.urandom(4)
                    self._xsrf_token = b"|".join(
                        [
                            b"2",
                            binascii.b2a_hex(mask),
                            binascii.b2a_hex(_websocket_mask(mask, token)),
                            utf8(str(int(timestamp))),
                        ]
                    )
                else:
                    raise ValueError("unknown xsrf cookie version %d", output_version)
                if version is None:
                    if self.current_user and "expires_days" not in cookie_kwargs:
                        cookie_kwargs["expires_days"] = 30
                    self.set_cookie("_xsrf", self._xsrf_token, **cookie_kwargs)
            return self._xsrf_token
    
        def _get_raw_xsrf_token(self) -> Tuple[Optional[int], bytes, float]:
            """Read or generate the xsrf token in its raw form.
    
            The raw_xsrf_token is a tuple containing:
    
            * version: the version of the cookie from which this token was read,
              or None if we generated a new token in this request.
            * token: the raw token data; random (non-ascii) bytes.
            * timestamp: the time this token was generated (will not be accurate
              for version 1 cookies)
            """
            if not hasattr(self, "_raw_xsrf_token"):
                cookie = self.get_cookie("_xsrf")
                if cookie:
                    version, token, timestamp = self._decode_xsrf_token(cookie)
                else:
                    version, token, timestamp = None, None, None
                if token is None:
                    version = None
                    token = os.urandom(16)
                    timestamp = time.time()
                assert token is not None
                assert timestamp is not None
                self._raw_xsrf_token = (version, token, timestamp)
            return self._raw_xsrf_token
    
        def _decode_xsrf_token(
            self, cookie: str
        ) -> Tuple[Optional[int], Optional[bytes], Optional[float]]:
            """Convert a cookie string into a the tuple form returned by
            _get_raw_xsrf_token.
            """
    
            try:
                m = _signed_value_version_re.match(utf8(cookie))
    
                if m:
                    version = int(m.group(1))
                    if version == 2:
                        _, mask_str, masked_token, timestamp_str = cookie.split("|")
    
                        mask = binascii.a2b_hex(utf8(mask_str))
                        token = _websocket_mask(mask, binascii.a2b_hex(utf8(masked_token)))
                        timestamp = int(timestamp_str)
                        return version, token, timestamp
                    else:
                        # Treat unknown versions as not present instead of failing.
                        raise Exception("Unknown xsrf cookie version")
                else:
                    version = 1
                    try:
                        token = binascii.a2b_hex(utf8(cookie))
                    except (binascii.Error, TypeError):
                        token = utf8(cookie)
                    # We don't have a usable timestamp in older versions.
                    timestamp = int(time.time())
                    return (version, token, timestamp)
            except Exception:
                # Catch exceptions and return nothing instead of failing.
                gen_log.debug("Uncaught exception in _decode_xsrf_token", exc_info=True)
                return None, None, None
    
        def check_xsrf_cookie(self) -> None:
            """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
    
            To prevent cross-site request forgery, we set an ``_xsrf``
            cookie and include the same value as a non-cookie
            field with all ``POST`` requests. If the two do not match, we
            reject the form submission as a potential forgery.
    
            The ``_xsrf`` value may be set as either a form field named ``_xsrf``
            or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
            (the latter is accepted for compatibility with Django).
    
            See http://en.wikipedia.org/wiki/Cross-site_request_forgery
    
            .. versionchanged:: 3.2.2
               Added support for cookie version 2.  Both versions 1 and 2 are
               supported.
            """
            # Prior to release 1.1.1, this check was ignored if the HTTP header
            # ``X-Requested-With: XMLHTTPRequest`` was present.  This exception
            # has been shown to be insecure and has been removed.  For more
            # information please see
            # http://www.djangoproject.com/weblog/2011/feb/08/security/
            # http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
            token = (
                self.get_argument("_xsrf", None)
                or self.request.headers.get("X-Xsrftoken")
                or self.request.headers.get("X-Csrftoken")
            )
            if not token:
                raise HTTPError(403, "'_xsrf' argument missing from POST")
            _, token, _ = self._decode_xsrf_token(token)
            _, expected_token, _ = self._get_raw_xsrf_token()
            if not token:
                raise HTTPError(403, "'_xsrf' argument has invalid format")
            if not hmac.compare_digest(utf8(token), utf8(expected_token)):
                raise HTTPError(403, "XSRF cookie does not match POST argument")
    
        def xsrf_form_html(self) -> str:
            """An HTML ```` element to be included with all POST forms.
    
            It defines the ``_xsrf`` input value, which we check on all POST
            requests to prevent cross-site request forgery. If you have set
            the ``xsrf_cookies`` application setting, you must include this
            HTML within all of your HTML forms.
    
            In a template, this method should be called with ``{% module
            xsrf_form_html() %}``
    
            See `check_xsrf_cookie()` above for more information.
            """
            return (
                ''
            )
    
        def static_url(
            self, path: str, include_host: Optional[bool] = None, **kwargs: Any
        ) -> str:
            """Returns a static URL for the given relative static file path.
    
            This method requires you set the ``static_path`` setting in your
            application (which specifies the root directory of your static
            files).
    
            This method returns a versioned url (by default appending
            ``?v=``), which allows the static files to be
            cached indefinitely.  This can be disabled by passing
            ``include_version=False`` (in the default implementation;
            other static file implementations are not required to support
            this, but they may support other options).
    
            By default this method returns URLs relative to the current
            host, but if ``include_host`` is true the URL returned will be
            absolute.  If this handler has an ``include_host`` attribute,
            that value will be used as the default for all `static_url`
            calls that do not pass ``include_host`` as a keyword argument.
    
            """
            self.require_setting("static_path", "static_url")
            get_url = self.settings.get(
                "static_handler_class", StaticFileHandler
            ).make_static_url
    
            if include_host is None:
                include_host = getattr(self, "include_host", False)
    
            if include_host:
                base = self.request.protocol + "://" + self.request.host
            else:
                base = ""
    
            return base + get_url(self.settings, path, **kwargs)
    
        def require_setting(self, name: str, feature: str = "this feature") -> None:
            """Raises an exception if the given app setting is not defined."""
            if not self.application.settings.get(name):
                raise Exception(
                    "You must define the '%s' setting in your "
                    "application to use %s" % (name, feature)
                )
    
        def reverse_url(self, name: str, *args: Any) -> str:
            """Alias for `Application.reverse_url`."""
            return self.application.reverse_url(name, *args)
    
        def compute_etag(self) -> Optional[str]:
            """Computes the etag header to be used for this request.
    
            By default uses a hash of the content written so far.
    
            May be overridden to provide custom etag implementations,
            or may return None to disable tornado's default etag support.
            """
            hasher = hashlib.sha1()
            for part in self._write_buffer:
                hasher.update(part)
            return '"%s"' % hasher.hexdigest()
    
        def set_etag_header(self) -> None:
            """Sets the response's Etag header using ``self.compute_etag()``.
    
            Note: no header will be set if ``compute_etag()`` returns ``None``.
    
            This method is called automatically when the request is finished.
            """
            etag = self.compute_etag()
            if etag is not None:
                self.set_header("Etag", etag)
    
        def check_etag_header(self) -> bool:
            """Checks the ``Etag`` header against requests's ``If-None-Match``.
    
            Returns ``True`` if the request's Etag matches and a 304 should be
            returned. For example::
    
                self.set_etag_header()
                if self.check_etag_header():
                    self.set_status(304)
                    return
    
            This method is called automatically when the request is finished,
            but may be called earlier for applications that override
            `compute_etag` and want to do an early check for ``If-None-Match``
            before completing the request.  The ``Etag`` header should be set
            (perhaps with `set_etag_header`) before calling this method.
            """
            computed_etag = utf8(self._headers.get("Etag", ""))
            # Find all weak and strong etag values from If-None-Match header
            # because RFC 7232 allows multiple etag values in a single header.
            etags = re.findall(
                br'\*|(?:W/)?"[^"]*"', utf8(self.request.headers.get("If-None-Match", ""))
            )
            if not computed_etag or not etags:
                return False
    
            match = False
            if etags[0] == b"*":
                match = True
            else:
                # Use a weak comparison when comparing entity-tags.
                def val(x: bytes) -> bytes:
                    return x[2:] if x.startswith(b"W/") else x
    
                for etag in etags:
                    if val(etag) == val(computed_etag):
                        match = True
                        break
            return match
    
        async def _execute(
            self, transforms: List["OutputTransform"], *args: bytes, **kwargs: bytes
        ) -> None:
            """Executes this request with the given output transforms."""
            self._transforms = transforms
            try:
                if self.request.method not in self.SUPPORTED_METHODS:
                    raise HTTPError(405)
                self.path_args = [self.decode_argument(arg) for arg in args]
                self.path_kwargs = dict(
                    (k, self.decode_argument(v, name=k)) for (k, v) in kwargs.items()
                )
                # If XSRF cookies are turned on, reject form submissions without
                # the proper cookie
                if self.request.method not in (
                    "GET",
                    "HEAD",
                    "OPTIONS",
                ) and self.application.settings.get("xsrf_cookies"):
                    self.check_xsrf_cookie()
    
                result = self.prepare()
                if result is not None:
                    result = await result
                if self._prepared_future is not None:
                    # Tell the Application we've finished with prepare()
                    # and are ready for the body to arrive.
                    future_set_result_unless_cancelled(self._prepared_future, None)
                if self._finished:
                    return
    
                if _has_stream_request_body(self.__class__):
                    # In streaming mode request.body is a Future that signals
                    # the body has been completely received.  The Future has no
                    # result; the data has been passed to self.data_received
                    # instead.
                    try:
                        await self.request._body_future
                    except iostream.StreamClosedError:
                        return
    
                method = getattr(self, self.request.method.lower())
                result = method(*self.path_args, **self.path_kwargs)
                if result is not None:
                    result = await result
                if self._auto_finish and not self._finished:
                    self.finish()
            except Exception as e:
                try:
                    self._handle_request_exception(e)
                except Exception:
                    app_log.error("Exception in exception handler", exc_info=True)
                finally:
                    # Unset result to avoid circular references
                    result = None
                if self._prepared_future is not None and not self._prepared_future.done():
                    # In case we failed before setting _prepared_future, do it
                    # now (to unblock the HTTP server).  Note that this is not
                    # in a finally block to avoid GC issues prior to Python 3.4.
                    self._prepared_future.set_result(None)
    
        def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
            """Implement this method to handle streamed request data.
    
            Requires the `.stream_request_body` decorator.
    
            May be a coroutine for flow control.
            """
            raise NotImplementedError()
    
        def _log(self) -> None:
            """Logs the current request.
    
            Sort of deprecated since this functionality was moved to the
            Application, but left in place for the benefit of existing apps
            that have overridden this method.
            """
            self.application.log_request(self)
    
        def _request_summary(self) -> str:
            return "%s %s (%s)" % (
                self.request.method,
                self.request.uri,
                self.request.remote_ip,
            )
    
        def _handle_request_exception(self, e: BaseException) -> None:
            if isinstance(e, Finish):
                # Not an error; just finish the request without logging.
                if not self._finished:
                    self.finish(*e.args)
                return
            try:
                self.log_exception(*sys.exc_info())
            except Exception:
                # An error here should still get a best-effort send_error()
                # to avoid leaking the connection.
                app_log.error("Error in exception logger", exc_info=True)
            if self._finished:
                # Extra errors after the request has been finished should
                # be logged, but there is no reason to continue to try and
                # send a response.
                return
            if isinstance(e, HTTPError):
                self.send_error(e.status_code, exc_info=sys.exc_info())
            else:
                self.send_error(500, exc_info=sys.exc_info())
    
        def log_exception(
            self,
            typ: "Optional[Type[BaseException]]",
            value: Optional[BaseException],
            tb: Optional[TracebackType],
        ) -> None:
            """Override to customize logging of uncaught exceptions.
    
            By default logs instances of `HTTPError` as warnings without
            stack traces (on the ``tornado.general`` logger), and all
            other exceptions as errors with stack traces (on the
            ``tornado.application`` logger).
    
            .. versionadded:: 3.1
            """
            if isinstance(value, HTTPError):
                if value.log_message:
                    format = "%d %s: " + value.log_message
                    args = [value.status_code, self._request_summary()] + list(value.args)
                    gen_log.warning(format, *args)
            else:
                app_log.error(
                    "Uncaught exception %s\n%r",
                    self._request_summary(),
                    self.request,
                    exc_info=(typ, value, tb),  # type: ignore
                )
    
        def _ui_module(self, name: str, module: Type["UIModule"]) -> Callable[..., str]:
            def render(*args, **kwargs) -> str:  # type: ignore
                if not hasattr(self, "_active_modules"):
                    self._active_modules = {}  # type: Dict[str, UIModule]
                if name not in self._active_modules:
                    self._active_modules[name] = module(self)
                rendered = self._active_modules[name].render(*args, **kwargs)
                return rendered
    
            return render
    
        def _ui_method(self, method: Callable[..., str]) -> Callable[..., str]:
            return lambda *args, **kwargs: method(self, *args, **kwargs)
    
        def _clear_representation_headers(self) -> None:
            # 304 responses should not contain representation metadata
            # headers (defined in
            # https://tools.ietf.org/html/rfc7231#section-3.1)
            # not explicitly allowed by
            # https://tools.ietf.org/html/rfc7232#section-4.1
            headers = ["Content-Encoding", "Content-Language", "Content-Type"]
            for h in headers:
                self.clear_header(h)
    
    
    def stream_request_body(cls: Type[RequestHandler]) -> Type[RequestHandler]:
        """Apply to `RequestHandler` subclasses to enable streaming body support.
    
        This decorator implies the following changes:
    
        * `.HTTPServerRequest.body` is undefined, and body arguments will not
          be included in `RequestHandler.get_argument`.
        * `RequestHandler.prepare` is called when the request headers have been
          read instead of after the entire body has been read.
        * The subclass must define a method ``data_received(self, data):``, which
          will be called zero or more times as data is available.  Note that
          if the request has an empty body, ``data_received`` may not be called.
        * ``prepare`` and ``data_received`` may return Futures (such as via
          ``@gen.coroutine``, in which case the next method will not be called
          until those futures have completed.
        * The regular HTTP method (``post``, ``put``, etc) will be called after
          the entire body has been read.
    
        See the `file receiver demo `_
        for example usage.
        """  # noqa: E501
        if not issubclass(cls, RequestHandler):
            raise TypeError("expected subclass of RequestHandler, got %r", cls)
        cls._stream_request_body = True
        return cls
    
    
    def _has_stream_request_body(cls: Type[RequestHandler]) -> bool:
        if not issubclass(cls, RequestHandler):
            raise TypeError("expected subclass of RequestHandler, got %r", cls)
        return cls._stream_request_body
    
    
    def removeslash(
        method: Callable[..., Optional[Awaitable[None]]]
    ) -> Callable[..., Optional[Awaitable[None]]]:
        """Use this decorator to remove trailing slashes from the request path.
    
        For example, a request to ``/foo/`` would redirect to ``/foo`` with this
        decorator. Your request handler mapping should use a regular expression
        like ``r'/foo/*'`` in conjunction with using the decorator.
        """
    
        @functools.wraps(method)
        def wrapper(  # type: ignore
            self: RequestHandler, *args, **kwargs
        ) -> Optional[Awaitable[None]]:
            if self.request.path.endswith("/"):
                if self.request.method in ("GET", "HEAD"):
                    uri = self.request.path.rstrip("/")
                    if uri:  # don't try to redirect '/' to ''
                        if self.request.query:
                            uri += "?" + self.request.query
                        self.redirect(uri, permanent=True)
                        return None
                else:
                    raise HTTPError(404)
            return method(self, *args, **kwargs)
    
        return wrapper
    
    
    def addslash(
        method: Callable[..., Optional[Awaitable[None]]]
    ) -> Callable[..., Optional[Awaitable[None]]]:
        """Use this decorator to add a missing trailing slash to the request path.
    
        For example, a request to ``/foo`` would redirect to ``/foo/`` with this
        decorator. Your request handler mapping should use a regular expression
        like ``r'/foo/?'`` in conjunction with using the decorator.
        """
    
        @functools.wraps(method)
        def wrapper(  # type: ignore
            self: RequestHandler, *args, **kwargs
        ) -> Optional[Awaitable[None]]:
            if not self.request.path.endswith("/"):
                if self.request.method in ("GET", "HEAD"):
                    uri = self.request.path + "/"
                    if self.request.query:
                        uri += "?" + self.request.query
                    self.redirect(uri, permanent=True)
                    return None
                raise HTTPError(404)
            return method(self, *args, **kwargs)
    
        return wrapper
    
    
    class _ApplicationRouter(ReversibleRuleRouter):
        """Routing implementation used internally by `Application`.
    
        Provides a binding between `Application` and `RequestHandler`.
        This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
            * it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
            * it allows to use a list/tuple of rules as `~.routing.Rule` target.
            ``process_rule`` implementation will substitute this list with an appropriate
            `_ApplicationRouter` instance.
        """
    
        def __init__(
            self, application: "Application", rules: Optional[_RuleList] = None
        ) -> None:
            assert isinstance(application, Application)
            self.application = application
            super().__init__(rules)
    
        def process_rule(self, rule: Rule) -> Rule:
            rule = super().process_rule(rule)
    
            if isinstance(rule.target, (list, tuple)):
                rule.target = _ApplicationRouter(
                    self.application, rule.target  # type: ignore
                )
    
            return rule
    
        def get_target_delegate(
            self, target: Any, request: httputil.HTTPServerRequest, **target_params: Any
        ) -> Optional[httputil.HTTPMessageDelegate]:
            if isclass(target) and issubclass(target, RequestHandler):
                return self.application.get_handler_delegate(
                    request, target, **target_params
                )
    
            return super().get_target_delegate(target, request, **target_params)
    
    
    class Application(ReversibleRouter):
        r"""A collection of request handlers that make up a web application.
    
        Instances of this class are callable and can be passed directly to
        HTTPServer to serve the application::
    
            application = web.Application([
                (r"/", MainPageHandler),
            ])
            http_server = httpserver.HTTPServer(application)
            http_server.listen(8080)
            ioloop.IOLoop.current().start()
    
        The constructor for this class takes in a list of `~.routing.Rule`
        objects or tuples of values corresponding to the arguments of
        `~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
        the values in square brackets being optional. The default matcher is
        `~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
        instead of ``(PathMatches(regexp), target)``.
    
        A common routing target is a `RequestHandler` subclass, but you can also
        use lists of rules as a target, which create a nested routing configuration::
    
            application = web.Application([
                (HostMatches("example.com"), [
                    (r"/", MainPageHandler),
                    (r"/feed", FeedHandler),
                ]),
            ])
    
        In addition to this you can use nested `~.routing.Router` instances,
        `~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
        (see `~.routing` module docs for more information).
    
        When we receive requests, we iterate over the list in order and
        instantiate an instance of the first request class whose regexp
        matches the request path. The request class can be specified as
        either a class object or a (fully-qualified) name.
    
        A dictionary may be passed as the third element (``target_kwargs``)
        of the tuple, which will be used as keyword arguments to the handler's
        constructor and `~RequestHandler.initialize` method. This pattern
        is used for the `StaticFileHandler` in this example (note that a
        `StaticFileHandler` can be installed automatically with the
        static_path setting described below)::
    
            application = web.Application([
                (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
            ])
    
        We support virtual hosts with the `add_handlers` method, which takes in
        a host regular expression as the first argument::
    
            application.add_handlers(r"www\.myhost\.com", [
                (r"/article/([0-9]+)", ArticleHandler),
            ])
    
        If there's no match for the current request's host, then ``default_host``
        parameter value is matched against host regular expressions.
    
    
        .. warning::
    
           Applications that do not use TLS may be vulnerable to :ref:`DNS
           rebinding ` attacks. This attack is especially
           relevant to applications that only listen on ``127.0.0.1`` or
           other private networks. Appropriate host patterns must be used
           (instead of the default of ``r'.*'``) to prevent this risk. The
           ``default_host`` argument must not be used in applications that
           may be vulnerable to DNS rebinding.
    
        You can serve static files by sending the ``static_path`` setting
        as a keyword argument. We will serve those files from the
        ``/static/`` URI (this is configurable with the
        ``static_url_prefix`` setting), and we will serve ``/favicon.ico``
        and ``/robots.txt`` from the same directory.  A custom subclass of
        `StaticFileHandler` can be specified with the
        ``static_handler_class`` setting.
    
        .. versionchanged:: 4.5
           Integration with the new `tornado.routing` module.
    
        """
    
        def __init__(
            self,
            handlers: Optional[_RuleList] = None,
            default_host: Optional[str] = None,
            transforms: Optional[List[Type["OutputTransform"]]] = None,
            **settings: Any
        ) -> None:
            if transforms is None:
                self.transforms = []  # type: List[Type[OutputTransform]]
                if settings.get("compress_response") or settings.get("gzip"):
                    self.transforms.append(GZipContentEncoding)
            else:
                self.transforms = transforms
            self.default_host = default_host
            self.settings = settings
            self.ui_modules = {
                "linkify": _linkify,
                "xsrf_form_html": _xsrf_form_html,
                "Template": TemplateModule,
            }
            self.ui_methods = {}  # type: Dict[str, Callable[..., str]]
            self._load_ui_modules(settings.get("ui_modules", {}))
            self._load_ui_methods(settings.get("ui_methods", {}))
            if self.settings.get("static_path"):
                path = self.settings["static_path"]
                handlers = list(handlers or [])
                static_url_prefix = settings.get("static_url_prefix", "/static/")
                static_handler_class = settings.get(
                    "static_handler_class", StaticFileHandler
                )
                static_handler_args = settings.get("static_handler_args", {})
                static_handler_args["path"] = path
                for pattern in [
                    re.escape(static_url_prefix) + r"(.*)",
                    r"/(favicon\.ico)",
                    r"/(robots\.txt)",
                ]:
                    handlers.insert(0, (pattern, static_handler_class, static_handler_args))
    
            if self.settings.get("debug"):
                self.settings.setdefault("autoreload", True)
                self.settings.setdefault("compiled_template_cache", False)
                self.settings.setdefault("static_hash_cache", False)
                self.settings.setdefault("serve_traceback", True)
    
            self.wildcard_router = _ApplicationRouter(self, handlers)
            self.default_router = _ApplicationRouter(
                self, [Rule(AnyMatches(), self.wildcard_router)]
            )
    
            # Automatically reload modified modules
            if self.settings.get("autoreload"):
                from tornado import autoreload
    
                autoreload.start()
    
        def listen(self, port: int, address: str = "", **kwargs: Any) -> HTTPServer:
            """Starts an HTTP server for this application on the given port.
    
            This is a convenience alias for creating an `.HTTPServer`
            object and calling its listen method.  Keyword arguments not
            supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
            `.HTTPServer` constructor.  For advanced uses
            (e.g. multi-process mode), do not use this method; create an
            `.HTTPServer` and call its
            `.TCPServer.bind`/`.TCPServer.start` methods directly.
    
            Note that after calling this method you still need to call
            ``IOLoop.current().start()`` to start the server.
    
            Returns the `.HTTPServer` object.
    
            .. versionchanged:: 4.3
               Now returns the `.HTTPServer` object.
            """
            server = HTTPServer(self, **kwargs)
            server.listen(port, address)
            return server
    
        def add_handlers(self, host_pattern: str, host_handlers: _RuleList) -> None:
            """Appends the given handlers to our handler list.
    
            Host patterns are processed sequentially in the order they were
            added. All matching patterns will be considered.
            """
            host_matcher = HostMatches(host_pattern)
            rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
    
            self.default_router.rules.insert(-1, rule)
    
            if self.default_host is not None:
                self.wildcard_router.add_rules(
                    [(DefaultHostMatches(self, host_matcher.host_pattern), host_handlers)]
                )
    
        def add_transform(self, transform_class: Type["OutputTransform"]) -> None:
            self.transforms.append(transform_class)
    
        def _load_ui_methods(self, methods: Any) -> None:
            if isinstance(methods, types.ModuleType):
                self._load_ui_methods(dict((n, getattr(methods, n)) for n in dir(methods)))
            elif isinstance(methods, list):
                for m in methods:
                    self._load_ui_methods(m)
            else:
                for name, fn in methods.items():
                    if (
                        not name.startswith("_")
                        and hasattr(fn, "__call__")
                        and name[0].lower() == name[0]
                    ):
                        self.ui_methods[name] = fn
    
        def _load_ui_modules(self, modules: Any) -> None:
            if isinstance(modules, types.ModuleType):
                self._load_ui_modules(dict((n, getattr(modules, n)) for n in dir(modules)))
            elif isinstance(modules, list):
                for m in modules:
                    self._load_ui_modules(m)
            else:
                assert isinstance(modules, dict)
                for name, cls in modules.items():
                    try:
                        if issubclass(cls, UIModule):
                            self.ui_modules[name] = cls
                    except TypeError:
                        pass
    
        def __call__(
            self, request: httputil.HTTPServerRequest
        ) -> Optional[Awaitable[None]]:
            # Legacy HTTPServer interface
            dispatcher = self.find_handler(request)
            return dispatcher.execute()
    
        def find_handler(
            self, request: httputil.HTTPServerRequest, **kwargs: Any
        ) -> "_HandlerDelegate":
            route = self.default_router.find_handler(request)
            if route is not None:
                return cast("_HandlerDelegate", route)
    
            if self.settings.get("default_handler_class"):
                return self.get_handler_delegate(
                    request,
                    self.settings["default_handler_class"],
                    self.settings.get("default_handler_args", {}),
                )
    
            return self.get_handler_delegate(request, ErrorHandler, {"status_code": 404})
    
        def get_handler_delegate(
            self,
            request: httputil.HTTPServerRequest,
            target_class: Type[RequestHandler],
            target_kwargs: Optional[Dict[str, Any]] = None,
            path_args: Optional[List[bytes]] = None,
            path_kwargs: Optional[Dict[str, bytes]] = None,
        ) -> "_HandlerDelegate":
            """Returns `~.httputil.HTTPMessageDelegate` that can serve a request
            for application and `RequestHandler` subclass.
    
            :arg httputil.HTTPServerRequest request: current HTTP request.
            :arg RequestHandler target_class: a `RequestHandler` class.
            :arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
            :arg list path_args: positional arguments for ``target_class`` HTTP method that
                will be executed while handling a request (``get``, ``post`` or any other).
            :arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
            """
            return _HandlerDelegate(
                self, request, target_class, target_kwargs, path_args, path_kwargs
            )
    
        def reverse_url(self, name: str, *args: Any) -> str:
            """Returns a URL path for handler named ``name``
    
            The handler must be added to the application as a named `URLSpec`.
    
            Args will be substituted for capturing groups in the `URLSpec` regex.
            They will be converted to strings if necessary, encoded as utf8,
            and url-escaped.
            """
            reversed_url = self.default_router.reverse_url(name, *args)
            if reversed_url is not None:
                return reversed_url
    
            raise KeyError("%s not found in named urls" % name)
    
        def log_request(self, handler: RequestHandler) -> None:
            """Writes a completed HTTP request to the logs.
    
            By default writes to the python root logger.  To change
            this behavior either subclass Application and override this method,
            or pass a function in the application settings dictionary as
            ``log_function``.
            """
            if "log_function" in self.settings:
                self.settings["log_function"](handler)
                return
            if handler.get_status() < 400:
                log_method = access_log.info
            elif handler.get_status() < 500:
                log_method = access_log.warning
            else:
                log_method = access_log.error
            request_time = 1000.0 * handler.request.request_time()
            log_method(
                "%d %s %.2fms",
                handler.get_status(),
                handler._request_summary(),
                request_time,
            )
    
    
    class _HandlerDelegate(httputil.HTTPMessageDelegate):
        def __init__(
            self,
            application: Application,
            request: httputil.HTTPServerRequest,
            handler_class: Type[RequestHandler],
            handler_kwargs: Optional[Dict[str, Any]],
            path_args: Optional[List[bytes]],
            path_kwargs: Optional[Dict[str, bytes]],
        ) -> None:
            self.application = application
            self.connection = request.connection
            self.request = request
            self.handler_class = handler_class
            self.handler_kwargs = handler_kwargs or {}
            self.path_args = path_args or []
            self.path_kwargs = path_kwargs or {}
            self.chunks = []  # type: List[bytes]
            self.stream_request_body = _has_stream_request_body(self.handler_class)
    
        def headers_received(
            self,
            start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
            headers: httputil.HTTPHeaders,
        ) -> Optional[Awaitable[None]]:
            if self.stream_request_body:
                self.request._body_future = Future()
                return self.execute()
            return None
    
        def data_received(self, data: bytes) -> Optional[Awaitable[None]]:
            if self.stream_request_body:
                return self.handler.data_received(data)
            else:
                self.chunks.append(data)
                return None
    
        def finish(self) -> None:
            if self.stream_request_body:
                future_set_result_unless_cancelled(self.request._body_future, None)
            else:
                self.request.body = b"".join(self.chunks)
                self.request._parse_body()
                self.execute()
    
        def on_connection_close(self) -> None:
            if self.stream_request_body:
                self.handler.on_connection_close()
            else:
                self.chunks = None  # type: ignore
    
        def execute(self) -> Optional[Awaitable[None]]:
            # If template cache is disabled (usually in the debug mode),
            # re-compile templates and reload static files on every
            # request so you don't need to restart to see changes
            if not self.application.settings.get("compiled_template_cache", True):
                with RequestHandler._template_loader_lock:
                    for loader in RequestHandler._template_loaders.values():
                        loader.reset()
            if not self.application.settings.get("static_hash_cache", True):
                StaticFileHandler.reset()
    
            self.handler = self.handler_class(
                self.application, self.request, **self.handler_kwargs
            )
            transforms = [t(self.request) for t in self.application.transforms]
    
            if self.stream_request_body:
                self.handler._prepared_future = Future()
            # Note that if an exception escapes handler._execute it will be
            # trapped in the Future it returns (which we are ignoring here,
            # leaving it to be logged when the Future is GC'd).
            # However, that shouldn't happen because _execute has a blanket
            # except handler, and we cannot easily access the IOLoop here to
            # call add_future (because of the requirement to remain compatible
            # with WSGI)
            fut = gen.convert_yielded(
                self.handler._execute(transforms, *self.path_args, **self.path_kwargs)
            )
            fut.add_done_callback(lambda f: f.result())
            # If we are streaming the request body, then execute() is finished
            # when the handler has prepared to receive the body.  If not,
            # it doesn't matter when execute() finishes (so we return None)
            return self.handler._prepared_future
    
    
    class HTTPError(Exception):
        """An exception that will turn into an HTTP error response.
    
        Raising an `HTTPError` is a convenient alternative to calling
        `RequestHandler.send_error` since it automatically ends the
        current function.
    
        To customize the response sent with an `HTTPError`, override
        `RequestHandler.write_error`.
    
        :arg int status_code: HTTP status code.  Must be listed in
            `httplib.responses ` unless the ``reason``
            keyword argument is given.
        :arg str log_message: Message to be written to the log for this error
            (will not be shown to the user unless the `Application` is in debug
            mode).  May contain ``%s``-style placeholders, which will be filled
            in with remaining positional parameters.
        :arg str reason: Keyword-only argument.  The HTTP "reason" phrase
            to pass in the status line along with ``status_code``.  Normally
            determined automatically from ``status_code``, but can be used
            to use a non-standard numeric code.
        """
    
        def __init__(
            self,
            status_code: int = 500,
            log_message: Optional[str] = None,
            *args: Any,
            **kwargs: Any
        ) -> None:
            self.status_code = status_code
            self.log_message = log_message
            self.args = args
            self.reason = kwargs.get("reason", None)
            if log_message and not args:
                self.log_message = log_message.replace("%", "%%")
    
        def __str__(self) -> str:
            message = "HTTP %d: %s" % (
                self.status_code,
                self.reason or httputil.responses.get(self.status_code, "Unknown"),
            )
            if self.log_message:
                return message + " (" + (self.log_message % self.args) + ")"
            else:
                return message
    
    
    class Finish(Exception):
        """An exception that ends the request without producing an error response.
    
        When `Finish` is raised in a `RequestHandler`, the request will
        end (calling `RequestHandler.finish` if it hasn't already been
        called), but the error-handling methods (including
        `RequestHandler.write_error`) will not be called.
    
        If `Finish()` was created with no arguments, the pending response
        will be sent as-is. If `Finish()` was given an argument, that
        argument will be passed to `RequestHandler.finish()`.
    
        This can be a more convenient way to implement custom error pages
        than overriding ``write_error`` (especially in library code)::
    
            if self.current_user is None:
                self.set_status(401)
                self.set_header('WWW-Authenticate', 'Basic realm="something"')
                raise Finish()
    
        .. versionchanged:: 4.3
           Arguments passed to ``Finish()`` will be passed on to
           `RequestHandler.finish`.
        """
    
        pass
    
    
    class MissingArgumentError(HTTPError):
        """Exception raised by `RequestHandler.get_argument`.
    
        This is a subclass of `HTTPError`, so if it is uncaught a 400 response
        code will be used instead of 500 (and a stack trace will not be logged).
    
        .. versionadded:: 3.1
        """
    
        def __init__(self, arg_name: str) -> None:
            super().__init__(400, "Missing argument %s" % arg_name)
            self.arg_name = arg_name
    
    
    class ErrorHandler(RequestHandler):
        """Generates an error response with ``status_code`` for all requests."""
    
        def initialize(self, status_code: int) -> None:
            self.set_status(status_code)
    
        def prepare(self) -> None:
            raise HTTPError(self._status_code)
    
        def check_xsrf_cookie(self) -> None:
            # POSTs to an ErrorHandler don't actually have side effects,
            # so we don't need to check the xsrf token.  This allows POSTs
            # to the wrong url to return a 404 instead of 403.
            pass
    
    
    class RedirectHandler(RequestHandler):
        """Redirects the client to the given URL for all GET requests.
    
        You should provide the keyword argument ``url`` to the handler, e.g.::
    
            application = web.Application([
                (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
            ])
    
        `RedirectHandler` supports regular expression substitutions. E.g., to
        swap the first and second parts of a path while preserving the remainder::
    
            application = web.Application([
                (r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
            ])
    
        The final URL is formatted with `str.format` and the substrings that match
        the capturing groups. In the above example, a request to "/a/b/c" would be
        formatted like::
    
            str.format("/{1}/{0}/{2}", "a", "b", "c")  # -> "/b/a/c"
    
        Use Python's :ref:`format string syntax ` to customize how
        values are substituted.
    
        .. versionchanged:: 4.5
           Added support for substitutions into the destination URL.
    
        .. versionchanged:: 5.0
           If any query arguments are present, they will be copied to the
           destination URL.
        """
    
        def initialize(self, url: str, permanent: bool = True) -> None:
            self._url = url
            self._permanent = permanent
    
        def get(self, *args: Any, **kwargs: Any) -> None:
            to_url = self._url.format(*args, **kwargs)
            if self.request.query_arguments:
                # TODO: figure out typing for the next line.
                to_url = httputil.url_concat(
                    to_url,
                    list(httputil.qs_to_qsl(self.request.query_arguments)),  # type: ignore
                )
            self.redirect(to_url, permanent=self._permanent)
    
    
    class StaticFileHandler(RequestHandler):
        """A simple handler that can serve static content from a directory.
    
        A `StaticFileHandler` is configured automatically if you pass the
        ``static_path`` keyword argument to `Application`.  This handler
        can be customized with the ``static_url_prefix``, ``static_handler_class``,
        and ``static_handler_args`` settings.
    
        To map an additional path to this handler for a static data directory
        you would add a line to your application like::
    
            application = web.Application([
                (r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
            ])
    
        The handler constructor requires a ``path`` argument, which specifies the
        local root directory of the content to be served.
    
        Note that a capture group in the regex is required to parse the value for
        the ``path`` argument to the get() method (different than the constructor
        argument above); see `URLSpec` for details.
    
        To serve a file like ``index.html`` automatically when a directory is
        requested, set ``static_handler_args=dict(default_filename="index.html")``
        in your application settings, or add ``default_filename`` as an initializer
        argument for your ``StaticFileHandler``.
    
        To maximize the effectiveness of browser caching, this class supports
        versioned urls (by default using the argument ``?v=``).  If a version
        is given, we instruct the browser to cache this file indefinitely.
        `make_static_url` (also available as `RequestHandler.static_url`) can
        be used to construct a versioned url.
    
        This handler is intended primarily for use in development and light-duty
        file serving; for heavy traffic it will be more efficient to use
        a dedicated static file server (such as nginx or Apache).  We support
        the HTTP ``Accept-Ranges`` mechanism to return partial content (because
        some browsers require this functionality to be present to seek in
        HTML5 audio or video).
    
        **Subclassing notes**
    
        This class is designed to be extensible by subclassing, but because
        of the way static urls are generated with class methods rather than
        instance methods, the inheritance patterns are somewhat unusual.
        Be sure to use the ``@classmethod`` decorator when overriding a
        class method.  Instance methods may use the attributes ``self.path``
        ``self.absolute_path``, and ``self.modified``.
    
        Subclasses should only override methods discussed in this section;
        overriding other methods is error-prone.  Overriding
        ``StaticFileHandler.get`` is particularly problematic due to the
        tight coupling with ``compute_etag`` and other methods.
    
        To change the way static urls are generated (e.g. to match the behavior
        of another server or CDN), override `make_static_url`, `parse_url_path`,
        `get_cache_time`, and/or `get_version`.
    
        To replace all interaction with the filesystem (e.g. to serve
        static content from a database), override `get_content`,
        `get_content_size`, `get_modified_time`, `get_absolute_path`, and
        `validate_absolute_path`.
    
        .. versionchanged:: 3.1
           Many of the methods for subclasses were added in Tornado 3.1.
        """
    
        CACHE_MAX_AGE = 86400 * 365 * 10  # 10 years
    
        _static_hashes = {}  # type: Dict[str, Optional[str]]
        _lock = threading.Lock()  # protects _static_hashes
    
        def initialize(self, path: str, default_filename: Optional[str] = None) -> None:
            self.root = path
            self.default_filename = default_filename
    
        @classmethod
        def reset(cls) -> None:
            with cls._lock:
                cls._static_hashes = {}
    
        def head(self, path: str) -> Awaitable[None]:
            return self.get(path, include_body=False)
    
        async def get(self, path: str, include_body: bool = True) -> None:
            # Set up our path instance variables.
            self.path = self.parse_url_path(path)
            del path  # make sure we don't refer to path instead of self.path again
            absolute_path = self.get_absolute_path(self.root, self.path)
            self.absolute_path = self.validate_absolute_path(self.root, absolute_path)
            if self.absolute_path is None:
                return
    
            self.modified = self.get_modified_time()
            self.set_headers()
    
            if self.should_return_304():
                self.set_status(304)
                return
    
            request_range = None
            range_header = self.request.headers.get("Range")
            if range_header:
                # As per RFC 2616 14.16, if an invalid Range header is specified,
                # the request will be treated as if the header didn't exist.
                request_range = httputil._parse_request_range(range_header)
    
            size = self.get_content_size()
            if request_range:
                start, end = request_range
                if start is not None and start < 0:
                    start += size
                    if start < 0:
                        start = 0
                if (
                    start is not None
                    and (start >= size or (end is not None and start >= end))
                ) or end == 0:
                    # As per RFC 2616 14.35.1, a range is not satisfiable only: if
                    # the first requested byte is equal to or greater than the
                    # content, or when a suffix with length 0 is specified.
                    # https://tools.ietf.org/html/rfc7233#section-2.1
                    # A byte-range-spec is invalid if the last-byte-pos value is present
                    # and less than the first-byte-pos.
                    self.set_status(416)  # Range Not Satisfiable
                    self.set_header("Content-Type", "text/plain")
                    self.set_header("Content-Range", "bytes */%s" % (size,))
                    return
                if end is not None and end > size:
                    # Clients sometimes blindly use a large range to limit their
                    # download size; cap the endpoint at the actual file size.
                    end = size
                # Note: only return HTTP 206 if less than the entire range has been
                # requested. Not only is this semantically correct, but Chrome
                # refuses to play audio if it gets an HTTP 206 in response to
                # ``Range: bytes=0-``.
                if size != (end or size) - (start or 0):
                    self.set_status(206)  # Partial Content
                    self.set_header(
                        "Content-Range", httputil._get_content_range(start, end, size)
                    )
            else:
                start = end = None
    
            if start is not None and end is not None:
                content_length = end - start
            elif end is not None:
                content_length = end
            elif start is not None:
                content_length = size - start
            else:
                content_length = size
            self.set_header("Content-Length", content_length)
    
            if include_body:
                content = self.get_content(self.absolute_path, start, end)
                if isinstance(content, bytes):
                    content = [content]
                for chunk in content:
                    try:
                        self.write(chunk)
                        await self.flush()
                    except iostream.StreamClosedError:
                        return
            else:
                assert self.request.method == "HEAD"
    
        def compute_etag(self) -> Optional[str]:
            """Sets the ``Etag`` header based on static url version.
    
            This allows efficient ``If-None-Match`` checks against cached
            versions, and sends the correct ``Etag`` for a partial response
            (i.e. the same ``Etag`` as the full file).
    
            .. versionadded:: 3.1
            """
            assert self.absolute_path is not None
            version_hash = self._get_cached_version(self.absolute_path)
            if not version_hash:
                return None
            return '"%s"' % (version_hash,)
    
        def set_headers(self) -> None:
            """Sets the content and caching headers on the response.
    
            .. versionadded:: 3.1
            """
            self.set_header("Accept-Ranges", "bytes")
            self.set_etag_header()
    
            if self.modified is not None:
                self.set_header("Last-Modified", self.modified)
    
            content_type = self.get_content_type()
            if content_type:
                self.set_header("Content-Type", content_type)
    
            cache_time = self.get_cache_time(self.path, self.modified, content_type)
            if cache_time > 0:
                self.set_header(
                    "Expires",
                    datetime.datetime.utcnow() + datetime.timedelta(seconds=cache_time),
                )
                self.set_header("Cache-Control", "max-age=" + str(cache_time))
    
            self.set_extra_headers(self.path)
    
        def should_return_304(self) -> bool:
            """Returns True if the headers indicate that we should return 304.
    
            .. versionadded:: 3.1
            """
            # If client sent If-None-Match, use it, ignore If-Modified-Since
            if self.request.headers.get("If-None-Match"):
                return self.check_etag_header()
    
            # Check the If-Modified-Since, and don't send the result if the
            # content has not been modified
            ims_value = self.request.headers.get("If-Modified-Since")
            if ims_value is not None:
                date_tuple = email.utils.parsedate(ims_value)
                if date_tuple is not None:
                    if_since = datetime.datetime(*date_tuple[:6])
                    assert self.modified is not None
                    if if_since >= self.modified:
                        return True
    
            return False
    
        @classmethod
        def get_absolute_path(cls, root: str, path: str) -> str:
            """Returns the absolute location of ``path`` relative to ``root``.
    
            ``root`` is the path configured for this `StaticFileHandler`
            (in most cases the ``static_path`` `Application` setting).
    
            This class method may be overridden in subclasses.  By default
            it returns a filesystem path, but other strings may be used
            as long as they are unique and understood by the subclass's
            overridden `get_content`.
    
            .. versionadded:: 3.1
            """
            abspath = os.path.abspath(os.path.join(root, path))
            return abspath
    
        def validate_absolute_path(self, root: str, absolute_path: str) -> Optional[str]:
            """Validate and return the absolute path.
    
            ``root`` is the configured path for the `StaticFileHandler`,
            and ``path`` is the result of `get_absolute_path`
    
            This is an instance method called during request processing,
            so it may raise `HTTPError` or use methods like
            `RequestHandler.redirect` (return None after redirecting to
            halt further processing).  This is where 404 errors for missing files
            are generated.
    
            This method may modify the path before returning it, but note that
            any such modifications will not be understood by `make_static_url`.
    
            In instance methods, this method's result is available as
            ``self.absolute_path``.
    
            .. versionadded:: 3.1
            """
            # os.path.abspath strips a trailing /.
            # We must add it back to `root` so that we only match files
            # in a directory named `root` instead of files starting with
            # that prefix.
            root = os.path.abspath(root)
            if not root.endswith(os.path.sep):
                # abspath always removes a trailing slash, except when
                # root is '/'. This is an unusual case, but several projects
                # have independently discovered this technique to disable
                # Tornado's path validation and (hopefully) do their own,
                # so we need to support it.
                root += os.path.sep
            # The trailing slash also needs to be temporarily added back
            # the requested path so a request to root/ will match.
            if not (absolute_path + os.path.sep).startswith(root):
                raise HTTPError(403, "%s is not in root static directory", self.path)
            if os.path.isdir(absolute_path) and self.default_filename is not None:
                # need to look at the request.path here for when path is empty
                # but there is some prefix to the path that was already
                # trimmed by the routing
                if not self.request.path.endswith("/"):
                    self.redirect(self.request.path + "/", permanent=True)
                    return None
                absolute_path = os.path.join(absolute_path, self.default_filename)
            if not os.path.exists(absolute_path):
                raise HTTPError(404)
            if not os.path.isfile(absolute_path):
                raise HTTPError(403, "%s is not a file", self.path)
            return absolute_path
    
        @classmethod
        def get_content(
            cls, abspath: str, start: Optional[int] = None, end: Optional[int] = None
        ) -> Generator[bytes, None, None]:
            """Retrieve the content of the requested resource which is located
            at the given absolute path.
    
            This class method may be overridden by subclasses.  Note that its
            signature is different from other overridable class methods
            (no ``settings`` argument); this is deliberate to ensure that
            ``abspath`` is able to stand on its own as a cache key.
    
            This method should either return a byte string or an iterator
            of byte strings.  The latter is preferred for large files
            as it helps reduce memory fragmentation.
    
            .. versionadded:: 3.1
            """
            with open(abspath, "rb") as file:
                if start is not None:
                    file.seek(start)
                if end is not None:
                    remaining = end - (start or 0)  # type: Optional[int]
                else:
                    remaining = None
                while True:
                    chunk_size = 64 * 1024
                    if remaining is not None and remaining < chunk_size:
                        chunk_size = remaining
                    chunk = file.read(chunk_size)
                    if chunk:
                        if remaining is not None:
                            remaining -= len(chunk)
                        yield chunk
                    else:
                        if remaining is not None:
                            assert remaining == 0
                        return
    
        @classmethod
        def get_content_version(cls, abspath: str) -> str:
            """Returns a version string for the resource at the given path.
    
            This class method may be overridden by subclasses.  The
            default implementation is a SHA-512 hash of the file's contents.
    
            .. versionadded:: 3.1
            """
            data = cls.get_content(abspath)
            hasher = hashlib.sha512()
            if isinstance(data, bytes):
                hasher.update(data)
            else:
                for chunk in data:
                    hasher.update(chunk)
            return hasher.hexdigest()
    
        def _stat(self) -> os.stat_result:
            assert self.absolute_path is not None
            if not hasattr(self, "_stat_result"):
                self._stat_result = os.stat(self.absolute_path)
            return self._stat_result
    
        def get_content_size(self) -> int:
            """Retrieve the total size of the resource at the given path.
    
            This method may be overridden by subclasses.
    
            .. versionadded:: 3.1
    
            .. versionchanged:: 4.0
               This method is now always called, instead of only when
               partial results are requested.
            """
            stat_result = self._stat()
            return stat_result.st_size
    
        def get_modified_time(self) -> Optional[datetime.datetime]:
            """Returns the time that ``self.absolute_path`` was last modified.
    
            May be overridden in subclasses.  Should return a `~datetime.datetime`
            object or None.
    
            .. versionadded:: 3.1
            """
            stat_result = self._stat()
            # NOTE: Historically, this used stat_result[stat.ST_MTIME],
            # which truncates the fractional portion of the timestamp. It
            # was changed from that form to stat_result.st_mtime to
            # satisfy mypy (which disallows the bracket operator), but the
            # latter form returns a float instead of an int. For
            # consistency with the past (and because we have a unit test
            # that relies on this), we truncate the float here, although
            # I'm not sure that's the right thing to do.
            modified = datetime.datetime.utcfromtimestamp(int(stat_result.st_mtime))
            return modified
    
        def get_content_type(self) -> str:
            """Returns the ``Content-Type`` header to be used for this request.
    
            .. versionadded:: 3.1
            """
            assert self.absolute_path is not None
            mime_type, encoding = mimetypes.guess_type(self.absolute_path)
            # per RFC 6713, use the appropriate type for a gzip compressed file
            if encoding == "gzip":
                return "application/gzip"
            # As of 2015-07-21 there is no bzip2 encoding defined at
            # http://www.iana.org/assignments/media-types/media-types.xhtml
            # So for that (and any other encoding), use octet-stream.
            elif encoding is not None:
                return "application/octet-stream"
            elif mime_type is not None:
                return mime_type
            # if mime_type not detected, use application/octet-stream
            else:
                return "application/octet-stream"
    
        def set_extra_headers(self, path: str) -> None:
            """For subclass to add extra headers to the response"""
            pass
    
        def get_cache_time(
            self, path: str, modified: Optional[datetime.datetime], mime_type: str
        ) -> int:
            """Override to customize cache control behavior.
    
            Return a positive number of seconds to make the result
            cacheable for that amount of time or 0 to mark resource as
            cacheable for an unspecified amount of time (subject to
            browser heuristics).
    
            By default returns cache expiry of 10 years for resources requested
            with ``v`` argument.
            """
            return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
    
        @classmethod
        def make_static_url(
            cls, settings: Dict[str, Any], path: str, include_version: bool = True
        ) -> str:
            """Constructs a versioned url for the given path.
    
            This method may be overridden in subclasses (but note that it
            is a class method rather than an instance method).  Subclasses
            are only required to implement the signature
            ``make_static_url(cls, settings, path)``; other keyword
            arguments may be passed through `~RequestHandler.static_url`
            but are not standard.
    
            ``settings`` is the `Application.settings` dictionary.  ``path``
            is the static path being requested.  The url returned should be
            relative to the current host.
    
            ``include_version`` determines whether the generated URL should
            include the query string containing the version hash of the
            file corresponding to the given ``path``.
    
            """
            url = settings.get("static_url_prefix", "/static/") + path
            if not include_version:
                return url
    
            version_hash = cls.get_version(settings, path)
            if not version_hash:
                return url
    
            return "%s?v=%s" % (url, version_hash)
    
        def parse_url_path(self, url_path: str) -> str:
            """Converts a static URL path into a filesystem path.
    
            ``url_path`` is the path component of the URL with
            ``static_url_prefix`` removed.  The return value should be
            filesystem path relative to ``static_path``.
    
            This is the inverse of `make_static_url`.
            """
            if os.path.sep != "/":
                url_path = url_path.replace("/", os.path.sep)
            return url_path
    
        @classmethod
        def get_version(cls, settings: Dict[str, Any], path: str) -> Optional[str]:
            """Generate the version string to be used in static URLs.
    
            ``settings`` is the `Application.settings` dictionary and ``path``
            is the relative location of the requested asset on the filesystem.
            The returned value should be a string, or ``None`` if no version
            could be determined.
    
            .. versionchanged:: 3.1
               This method was previously recommended for subclasses to override;
               `get_content_version` is now preferred as it allows the base
               class to handle caching of the result.
            """
            abs_path = cls.get_absolute_path(settings["static_path"], path)
            return cls._get_cached_version(abs_path)
    
        @classmethod
        def _get_cached_version(cls, abs_path: str) -> Optional[str]:
            with cls._lock:
                hashes = cls._static_hashes
                if abs_path not in hashes:
                    try:
                        hashes[abs_path] = cls.get_content_version(abs_path)
                    except Exception:
                        gen_log.error("Could not open static file %r", abs_path)
                        hashes[abs_path] = None
                hsh = hashes.get(abs_path)
                if hsh:
                    return hsh
            return None
    
    
    class FallbackHandler(RequestHandler):
        """A `RequestHandler` that wraps another HTTP server callback.
    
        The fallback is a callable object that accepts an
        `~.httputil.HTTPServerRequest`, such as an `Application` or
        `tornado.wsgi.WSGIContainer`.  This is most useful to use both
        Tornado ``RequestHandlers`` and WSGI in the same server.  Typical
        usage::
    
            wsgi_app = tornado.wsgi.WSGIContainer(
                django.core.handlers.wsgi.WSGIHandler())
            application = tornado.web.Application([
                (r"/foo", FooHandler),
                (r".*", FallbackHandler, dict(fallback=wsgi_app),
            ])
        """
    
        def initialize(
            self, fallback: Callable[[httputil.HTTPServerRequest], None]
        ) -> None:
            self.fallback = fallback
    
        def prepare(self) -> None:
            self.fallback(self.request)
            self._finished = True
            self.on_finish()
    
    
    class OutputTransform(object):
        """A transform modifies the result of an HTTP request (e.g., GZip encoding)
    
        Applications are not expected to create their own OutputTransforms
        or interact with them directly; the framework chooses which transforms
        (if any) to apply.
        """
    
        def __init__(self, request: httputil.HTTPServerRequest) -> None:
            pass
    
        def transform_first_chunk(
            self,
            status_code: int,
            headers: httputil.HTTPHeaders,
            chunk: bytes,
            finishing: bool,
        ) -> Tuple[int, httputil.HTTPHeaders, bytes]:
            return status_code, headers, chunk
    
        def transform_chunk(self, chunk: bytes, finishing: bool) -> bytes:
            return chunk
    
    
    class GZipContentEncoding(OutputTransform):
        """Applies the gzip content encoding to the response.
    
        See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
    
        .. versionchanged:: 4.0
            Now compresses all mime types beginning with ``text/``, instead
            of just a whitelist. (the whitelist is still used for certain
            non-text mime types).
        """
    
        # Whitelist of compressible mime types (in addition to any types
        # beginning with "text/").
        CONTENT_TYPES = set(
            [
                "application/javascript",
                "application/x-javascript",
                "application/xml",
                "application/atom+xml",
                "application/json",
                "application/xhtml+xml",
                "image/svg+xml",
            ]
        )
        # Python's GzipFile defaults to level 9, while most other gzip
        # tools (including gzip itself) default to 6, which is probably a
        # better CPU/size tradeoff.
        GZIP_LEVEL = 6
        # Responses that are too short are unlikely to benefit from gzipping
        # after considering the "Content-Encoding: gzip" header and the header
        # inside the gzip encoding.
        # Note that responses written in multiple chunks will be compressed
        # regardless of size.
        MIN_LENGTH = 1024
    
        def __init__(self, request: httputil.HTTPServerRequest) -> None:
            self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
    
        def _compressible_type(self, ctype: str) -> bool:
            return ctype.startswith("text/") or ctype in self.CONTENT_TYPES
    
        def transform_first_chunk(
            self,
            status_code: int,
            headers: httputil.HTTPHeaders,
            chunk: bytes,
            finishing: bool,
        ) -> Tuple[int, httputil.HTTPHeaders, bytes]:
            # TODO: can/should this type be inherited from the superclass?
            if "Vary" in headers:
                headers["Vary"] += ", Accept-Encoding"
            else:
                headers["Vary"] = "Accept-Encoding"
            if self._gzipping:
                ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
                self._gzipping = (
                    self._compressible_type(ctype)
                    and (not finishing or len(chunk) >= self.MIN_LENGTH)
                    and ("Content-Encoding" not in headers)
                )
            if self._gzipping:
                headers["Content-Encoding"] = "gzip"
                self._gzip_value = BytesIO()
                self._gzip_file = gzip.GzipFile(
                    mode="w", fileobj=self._gzip_value, compresslevel=self.GZIP_LEVEL
                )
                chunk = self.transform_chunk(chunk, finishing)
                if "Content-Length" in headers:
                    # The original content length is no longer correct.
                    # If this is the last (and only) chunk, we can set the new
                    # content-length; otherwise we remove it and fall back to
                    # chunked encoding.
                    if finishing:
                        headers["Content-Length"] = str(len(chunk))
                    else:
                        del headers["Content-Length"]
            return status_code, headers, chunk
    
        def transform_chunk(self, chunk: bytes, finishing: bool) -> bytes:
            if self._gzipping:
                self._gzip_file.write(chunk)
                if finishing:
                    self._gzip_file.close()
                else:
                    self._gzip_file.flush()
                chunk = self._gzip_value.getvalue()
                self._gzip_value.truncate(0)
                self._gzip_value.seek(0)
            return chunk
    
    
    def authenticated(
        method: Callable[..., Optional[Awaitable[None]]]
    ) -> Callable[..., Optional[Awaitable[None]]]:
        """Decorate methods with this to require that the user be logged in.
    
        If the user is not logged in, they will be redirected to the configured
        `login url `.
    
        If you configure a login url with a query parameter, Tornado will
        assume you know what you're doing and use it as-is.  If not, it
        will add a `next` parameter so the login page knows where to send
        you once you're logged in.
        """
    
        @functools.wraps(method)
        def wrapper(  # type: ignore
            self: RequestHandler, *args, **kwargs
        ) -> Optional[Awaitable[None]]:
            if not self.current_user:
                if self.request.method in ("GET", "HEAD"):
                    url = self.get_login_url()
                    if "?" not in url:
                        if urllib.parse.urlsplit(url).scheme:
                            # if login url is absolute, make next absolute too
                            next_url = self.request.full_url()
                        else:
                            assert self.request.uri is not None
                            next_url = self.request.uri
                        url += "?" + urlencode(dict(next=next_url))
                    self.redirect(url)
                    return None
                raise HTTPError(403)
            return method(self, *args, **kwargs)
    
        return wrapper
    
    
    class UIModule(object):
        """A re-usable, modular UI unit on a page.
    
        UI modules often execute additional queries, and they can include
        additional CSS and JavaScript that will be included in the output
        page, which is automatically inserted on page render.
    
        Subclasses of UIModule must override the `render` method.
        """
    
        def __init__(self, handler: RequestHandler) -> None:
            self.handler = handler
            self.request = handler.request
            self.ui = handler.ui
            self.locale = handler.locale
    
        @property
        def current_user(self) -> Any:
            return self.handler.current_user
    
        def render(self, *args: Any, **kwargs: Any) -> str:
            """Override in subclasses to return this module's output."""
            raise NotImplementedError()
    
        def embedded_javascript(self) -> Optional[str]:
            """Override to return a JavaScript string
            to be embedded in the page."""
            return None
    
        def javascript_files(self) -> Optional[Iterable[str]]:
            """Override to return a list of JavaScript files needed by this module.
    
            If the return values are relative paths, they will be passed to
            `RequestHandler.static_url`; otherwise they will be used as-is.
            """
            return None
    
        def embedded_css(self) -> Optional[str]:
            """Override to return a CSS string
            that will be embedded in the page."""
            return None
    
        def css_files(self) -> Optional[Iterable[str]]:
            """Override to returns a list of CSS files required by this module.
    
            If the return values are relative paths, they will be passed to
            `RequestHandler.static_url`; otherwise they will be used as-is.
            """
            return None
    
        def html_head(self) -> Optional[str]:
            """Override to return an HTML string that will be put in the 
            element.
            """
            return None
    
        def html_body(self) -> Optional[str]:
            """Override to return an HTML string that will be put at the end of
            the  element.
            """
            return None
    
        def render_string(self, path: str, **kwargs: Any) -> bytes:
            """Renders a template and returns it as a string."""
            return self.handler.render_string(path, **kwargs)
    
    
    class _linkify(UIModule):
        def render(self, text: str, **kwargs: Any) -> str:  # type: ignore
            return escape.linkify(text, **kwargs)
    
    
    class _xsrf_form_html(UIModule):
        def render(self) -> str:  # type: ignore
            return self.handler.xsrf_form_html()
    
    
    class TemplateModule(UIModule):
        """UIModule that simply renders the given template.
    
        {% module Template("foo.html") %} is similar to {% include "foo.html" %},
        but the module version gets its own namespace (with kwargs passed to
        Template()) instead of inheriting the outer template's namespace.
    
        Templates rendered through this module also get access to UIModule's
        automatic JavaScript/CSS features.  Simply call set_resources
        inside the template and give it keyword arguments corresponding to
        the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
        Note that these resources are output once per template file, not once
        per instantiation of the template, so they must not depend on
        any arguments to the template.
        """
    
        def __init__(self, handler: RequestHandler) -> None:
            super().__init__(handler)
            # keep resources in both a list and a dict to preserve order
            self._resource_list = []  # type: List[Dict[str, Any]]
            self._resource_dict = {}  # type: Dict[str, Dict[str, Any]]
    
        def render(self, path: str, **kwargs: Any) -> bytes:  # type: ignore
            def set_resources(**kwargs) -> str:  # type: ignore
                if path not in self._resource_dict:
                    self._resource_list.append(kwargs)
                    self._resource_dict[path] = kwargs
                else:
                    if self._resource_dict[path] != kwargs:
                        raise ValueError(
                            "set_resources called with different "
                            "resources for the same template"
                        )
                return ""
    
            return self.render_string(path, set_resources=set_resources, **kwargs)
    
        def _get_resources(self, key: str) -> Iterable[str]:
            return (r[key] for r in self._resource_list if key in r)
    
        def embedded_javascript(self) -> str:
            return "\n".join(self._get_resources("embedded_javascript"))
    
        def javascript_files(self) -> Iterable[str]:
            result = []
            for f in self._get_resources("javascript_files"):
                if isinstance(f, (unicode_type, bytes)):
                    result.append(f)
                else:
                    result.extend(f)
            return result
    
        def embedded_css(self) -> str:
            return "\n".join(self._get_resources("embedded_css"))
    
        def css_files(self) -> Iterable[str]:
            result = []
            for f in self._get_resources("css_files"):
                if isinstance(f, (unicode_type, bytes)):
                    result.append(f)
                else:
                    result.extend(f)
            return result
    
        def html_head(self) -> str:
            return "".join(self._get_resources("html_head"))
    
        def html_body(self) -> str:
            return "".join(self._get_resources("html_body"))
    
    
    class _UIModuleNamespace(object):
        """Lazy namespace which creates UIModule proxies bound to a handler."""
    
        def __init__(
            self, handler: RequestHandler, ui_modules: Dict[str, Type[UIModule]]
        ) -> None:
            self.handler = handler
            self.ui_modules = ui_modules
    
        def __getitem__(self, key: str) -> Callable[..., str]:
            return self.handler._ui_module(key, self.ui_modules[key])
    
        def __getattr__(self, key: str) -> Callable[..., str]:
            try:
                return self[key]
            except KeyError as e:
                raise AttributeError(str(e))
    
    
    def create_signed_value(
        secret: _CookieSecretTypes,
        name: str,
        value: Union[str, bytes],
        version: Optional[int] = None,
        clock: Optional[Callable[[], float]] = None,
        key_version: Optional[int] = None,
    ) -> bytes:
        if version is None:
            version = DEFAULT_SIGNED_VALUE_VERSION
        if clock is None:
            clock = time.time
    
        timestamp = utf8(str(int(clock())))
        value = base64.b64encode(utf8(value))
        if version == 1:
            assert not isinstance(secret, dict)
            signature = _create_signature_v1(secret, name, value, timestamp)
            value = b"|".join([value, timestamp, signature])
            return value
        elif version == 2:
            # The v2 format consists of a version number and a series of
            # length-prefixed fields "%d:%s", the last of which is a
            # signature, all separated by pipes.  All numbers are in
            # decimal format with no leading zeros.  The signature is an
            # HMAC-SHA256 of the whole string up to that point, including
            # the final pipe.
            #
            # The fields are:
            # - format version (i.e. 2; no length prefix)
            # - key version (integer, default is 0)
            # - timestamp (integer seconds since epoch)
            # - name (not encoded; assumed to be ~alphanumeric)
            # - value (base64-encoded)
            # - signature (hex-encoded; no length prefix)
            def format_field(s: Union[str, bytes]) -> bytes:
                return utf8("%d:" % len(s)) + utf8(s)
    
            to_sign = b"|".join(
                [
                    b"2",
                    format_field(str(key_version or 0)),
                    format_field(timestamp),
                    format_field(name),
                    format_field(value),
                    b"",
                ]
            )
    
            if isinstance(secret, dict):
                assert (
                    key_version is not None
                ), "Key version must be set when sign key dict is used"
                assert version >= 2, "Version must be at least 2 for key version support"
                secret = secret[key_version]
    
            signature = _create_signature_v2(secret, to_sign)
            return to_sign + signature
        else:
            raise ValueError("Unsupported version %d" % version)
    
    
    # A leading version number in decimal
    # with no leading zeros, followed by a pipe.
    _signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
    
    
    def _get_version(value: bytes) -> int:
        # Figures out what version value is.  Version 1 did not include an
        # explicit version field and started with arbitrary base64 data,
        # which makes this tricky.
        m = _signed_value_version_re.match(value)
        if m is None:
            version = 1
        else:
            try:
                version = int(m.group(1))
                if version > 999:
                    # Certain payloads from the version-less v1 format may
                    # be parsed as valid integers.  Due to base64 padding
                    # restrictions, this can only happen for numbers whose
                    # length is a multiple of 4, so we can treat all
                    # numbers up to 999 as versions, and for the rest we
                    # fall back to v1 format.
                    version = 1
            except ValueError:
                version = 1
        return version
    
    
    def decode_signed_value(
        secret: _CookieSecretTypes,
        name: str,
        value: Union[None, str, bytes],
        max_age_days: float = 31,
        clock: Optional[Callable[[], float]] = None,
        min_version: Optional[int] = None,
    ) -> Optional[bytes]:
        if clock is None:
            clock = time.time
        if min_version is None:
            min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
        if min_version > 2:
            raise ValueError("Unsupported min_version %d" % min_version)
        if not value:
            return None
    
        value = utf8(value)
        version = _get_version(value)
    
        if version < min_version:
            return None
        if version == 1:
            assert not isinstance(secret, dict)
            return _decode_signed_value_v1(secret, name, value, max_age_days, clock)
        elif version == 2:
            return _decode_signed_value_v2(secret, name, value, max_age_days, clock)
        else:
            return None
    
    
    def _decode_signed_value_v1(
        secret: Union[str, bytes],
        name: str,
        value: bytes,
        max_age_days: float,
        clock: Callable[[], float],
    ) -> Optional[bytes]:
        parts = utf8(value).split(b"|")
        if len(parts) != 3:
            return None
        signature = _create_signature_v1(secret, name, parts[0], parts[1])
        if not hmac.compare_digest(parts[2], signature):
            gen_log.warning("Invalid cookie signature %r", value)
            return None
        timestamp = int(parts[1])
        if timestamp < clock() - max_age_days * 86400:
            gen_log.warning("Expired cookie %r", value)
            return None
        if timestamp > clock() + 31 * 86400:
            # _cookie_signature does not hash a delimiter between the
            # parts of the cookie, so an attacker could transfer trailing
            # digits from the payload to the timestamp without altering the
            # signature.  For backwards compatibility, sanity-check timestamp
            # here instead of modifying _cookie_signature.
            gen_log.warning("Cookie timestamp in future; possible tampering %r", value)
            return None
        if parts[1].startswith(b"0"):
            gen_log.warning("Tampered cookie %r", value)
            return None
        try:
            return base64.b64decode(parts[0])
        except Exception:
            return None
    
    
    def _decode_fields_v2(value: bytes) -> Tuple[int, bytes, bytes, bytes, bytes]:
        def _consume_field(s: bytes) -> Tuple[bytes, bytes]:
            length, _, rest = s.partition(b":")
            n = int(length)
            field_value = rest[:n]
            # In python 3, indexing bytes returns small integers; we must
            # use a slice to get a byte string as in python 2.
            if rest[n : n + 1] != b"|":
                raise ValueError("malformed v2 signed value field")
            rest = rest[n + 1 :]
            return field_value, rest
    
        rest = value[2:]  # remove version number
        key_version, rest = _consume_field(rest)
        timestamp, rest = _consume_field(rest)
        name_field, rest = _consume_field(rest)
        value_field, passed_sig = _consume_field(rest)
        return int(key_version), timestamp, name_field, value_field, passed_sig
    
    
    def _decode_signed_value_v2(
        secret: _CookieSecretTypes,
        name: str,
        value: bytes,
        max_age_days: float,
        clock: Callable[[], float],
    ) -> Optional[bytes]:
        try:
            (
                key_version,
                timestamp_bytes,
                name_field,
                value_field,
                passed_sig,
            ) = _decode_fields_v2(value)
        except ValueError:
            return None
        signed_string = value[: -len(passed_sig)]
    
        if isinstance(secret, dict):
            try:
                secret = secret[key_version]
            except KeyError:
                return None
    
        expected_sig = _create_signature_v2(secret, signed_string)
        if not hmac.compare_digest(passed_sig, expected_sig):
            return None
        if name_field != utf8(name):
            return None
        timestamp = int(timestamp_bytes)
        if timestamp < clock() - max_age_days * 86400:
            # The signature has expired.
            return None
        try:
            return base64.b64decode(value_field)
        except Exception:
            return None
    
    
    def get_signature_key_version(value: Union[str, bytes]) -> Optional[int]:
        value = utf8(value)
        version = _get_version(value)
        if version < 2:
            return None
        try:
            key_version, _, _, _, _ = _decode_fields_v2(value)
        except ValueError:
            return None
    
        return key_version
    
    
    def _create_signature_v1(secret: Union[str, bytes], *parts: Union[str, bytes]) -> bytes:
        hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
        for part in parts:
            hash.update(utf8(part))
        return utf8(hash.hexdigest())
    
    
    def _create_signature_v2(secret: Union[str, bytes], s: bytes) -> bytes:
        hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
        hash.update(utf8(s))
        return utf8(hash.hexdigest())
    
    
    def is_absolute(path: str) -> bool:
        return any(path.startswith(x) for x in ["/", "http:", "https:"])
    tornado-6.1.0/tornado/websocket.py000066400000000000000000001700441374705040500172050ustar00rootroot00000000000000"""Implementation of the WebSocket protocol.
    
    `WebSockets `_ allow for bidirectional
    communication between the browser and server.
    
    WebSockets are supported in the current versions of all major browsers,
    although older versions that do not support WebSockets are still in use
    (refer to http://caniuse.com/websockets for details).
    
    This module implements the final version of the WebSocket protocol as
    defined in `RFC 6455 `_.  Certain
    browser versions (notably Safari 5.x) implemented an earlier draft of
    the protocol (known as "draft 76") and are not compatible with this module.
    
    .. versionchanged:: 4.0
       Removed support for the draft 76 protocol version.
    """
    
    import abc
    import asyncio
    import base64
    import hashlib
    import os
    import sys
    import struct
    import tornado.escape
    import tornado.web
    from urllib.parse import urlparse
    import zlib
    
    from tornado.concurrent import Future, future_set_result_unless_cancelled
    from tornado.escape import utf8, native_str, to_unicode
    from tornado import gen, httpclient, httputil
    from tornado.ioloop import IOLoop, PeriodicCallback
    from tornado.iostream import StreamClosedError, IOStream
    from tornado.log import gen_log, app_log
    from tornado import simple_httpclient
    from tornado.queues import Queue
    from tornado.tcpclient import TCPClient
    from tornado.util import _websocket_mask
    
    from typing import (
        TYPE_CHECKING,
        cast,
        Any,
        Optional,
        Dict,
        Union,
        List,
        Awaitable,
        Callable,
        Tuple,
        Type,
    )
    from types import TracebackType
    
    if TYPE_CHECKING:
        from typing_extensions import Protocol
    
        # The zlib compressor types aren't actually exposed anywhere
        # publicly, so declare protocols for the portions we use.
        class _Compressor(Protocol):
            def compress(self, data: bytes) -> bytes:
                pass
    
            def flush(self, mode: int) -> bytes:
                pass
    
        class _Decompressor(Protocol):
            unconsumed_tail = b""  # type: bytes
    
            def decompress(self, data: bytes, max_length: int) -> bytes:
                pass
    
        class _WebSocketDelegate(Protocol):
            # The common base interface implemented by WebSocketHandler on
            # the server side and WebSocketClientConnection on the client
            # side.
            def on_ws_connection_close(
                self, close_code: Optional[int] = None, close_reason: Optional[str] = None
            ) -> None:
                pass
    
            def on_message(self, message: Union[str, bytes]) -> Optional["Awaitable[None]"]:
                pass
    
            def on_ping(self, data: bytes) -> None:
                pass
    
            def on_pong(self, data: bytes) -> None:
                pass
    
            def log_exception(
                self,
                typ: Optional[Type[BaseException]],
                value: Optional[BaseException],
                tb: Optional[TracebackType],
            ) -> None:
                pass
    
    
    _default_max_message_size = 10 * 1024 * 1024
    
    
    class WebSocketError(Exception):
        pass
    
    
    class WebSocketClosedError(WebSocketError):
        """Raised by operations on a closed connection.
    
        .. versionadded:: 3.2
        """
    
        pass
    
    
    class _DecompressTooLargeError(Exception):
        pass
    
    
    class _WebSocketParams(object):
        def __init__(
            self,
            ping_interval: Optional[float] = None,
            ping_timeout: Optional[float] = None,
            max_message_size: int = _default_max_message_size,
            compression_options: Optional[Dict[str, Any]] = None,
        ) -> None:
            self.ping_interval = ping_interval
            self.ping_timeout = ping_timeout
            self.max_message_size = max_message_size
            self.compression_options = compression_options
    
    
    class WebSocketHandler(tornado.web.RequestHandler):
        """Subclass this class to create a basic WebSocket handler.
    
        Override `on_message` to handle incoming messages, and use
        `write_message` to send messages to the client. You can also
        override `open` and `on_close` to handle opened and closed
        connections.
    
        Custom upgrade response headers can be sent by overriding
        `~tornado.web.RequestHandler.set_default_headers` or
        `~tornado.web.RequestHandler.prepare`.
    
        See http://dev.w3.org/html5/websockets/ for details on the
        JavaScript interface.  The protocol is specified at
        http://tools.ietf.org/html/rfc6455.
    
        Here is an example WebSocket handler that echos back all received messages
        back to the client:
    
        .. testcode::
    
          class EchoWebSocket(tornado.websocket.WebSocketHandler):
              def open(self):
                  print("WebSocket opened")
    
              def on_message(self, message):
                  self.write_message(u"You said: " + message)
    
              def on_close(self):
                  print("WebSocket closed")
    
        .. testoutput::
           :hide:
    
        WebSockets are not standard HTTP connections. The "handshake" is
        HTTP, but after the handshake, the protocol is
        message-based. Consequently, most of the Tornado HTTP facilities
        are not available in handlers of this type. The only communication
        methods available to you are `write_message()`, `ping()`, and
        `close()`. Likewise, your request handler class should implement
        `open()` method rather than ``get()`` or ``post()``.
    
        If you map the handler above to ``/websocket`` in your application, you can
        invoke it in JavaScript with::
    
          var ws = new WebSocket("ws://localhost:8888/websocket");
          ws.onopen = function() {
             ws.send("Hello, world");
          };
          ws.onmessage = function (evt) {
             alert(evt.data);
          };
    
        This script pops up an alert box that says "You said: Hello, world".
    
        Web browsers allow any site to open a websocket connection to any other,
        instead of using the same-origin policy that governs other network
        access from JavaScript.  This can be surprising and is a potential
        security hole, so since Tornado 4.0 `WebSocketHandler` requires
        applications that wish to receive cross-origin websockets to opt in
        by overriding the `~WebSocketHandler.check_origin` method (see that
        method's docs for details).  Failure to do so is the most likely
        cause of 403 errors when making a websocket connection.
    
        When using a secure websocket connection (``wss://``) with a self-signed
        certificate, the connection from a browser may fail because it wants
        to show the "accept this certificate" dialog but has nowhere to show it.
        You must first visit a regular HTML page using the same certificate
        to accept it before the websocket connection will succeed.
    
        If the application setting ``websocket_ping_interval`` has a non-zero
        value, a ping will be sent periodically, and the connection will be
        closed if a response is not received before the ``websocket_ping_timeout``.
    
        Messages larger than the ``websocket_max_message_size`` application setting
        (default 10MiB) will not be accepted.
    
        .. versionchanged:: 4.5
           Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
           ``websocket_max_message_size``.
        """
    
        def __init__(
            self,
            application: tornado.web.Application,
            request: httputil.HTTPServerRequest,
            **kwargs: Any
        ) -> None:
            super().__init__(application, request, **kwargs)
            self.ws_connection = None  # type: Optional[WebSocketProtocol]
            self.close_code = None  # type: Optional[int]
            self.close_reason = None  # type: Optional[str]
            self.stream = None  # type: Optional[IOStream]
            self._on_close_called = False
    
        async def get(self, *args: Any, **kwargs: Any) -> None:
            self.open_args = args
            self.open_kwargs = kwargs
    
            # Upgrade header should be present and should be equal to WebSocket
            if self.request.headers.get("Upgrade", "").lower() != "websocket":
                self.set_status(400)
                log_msg = 'Can "Upgrade" only to "WebSocket".'
                self.finish(log_msg)
                gen_log.debug(log_msg)
                return
    
            # Connection header should be upgrade.
            # Some proxy servers/load balancers
            # might mess with it.
            headers = self.request.headers
            connection = map(
                lambda s: s.strip().lower(), headers.get("Connection", "").split(",")
            )
            if "upgrade" not in connection:
                self.set_status(400)
                log_msg = '"Connection" must be "Upgrade".'
                self.finish(log_msg)
                gen_log.debug(log_msg)
                return
    
            # Handle WebSocket Origin naming convention differences
            # The difference between version 8 and 13 is that in 8 the
            # client sends a "Sec-Websocket-Origin" header and in 13 it's
            # simply "Origin".
            if "Origin" in self.request.headers:
                origin = self.request.headers.get("Origin")
            else:
                origin = self.request.headers.get("Sec-Websocket-Origin", None)
    
            # If there was an origin header, check to make sure it matches
            # according to check_origin. When the origin is None, we assume it
            # did not come from a browser and that it can be passed on.
            if origin is not None and not self.check_origin(origin):
                self.set_status(403)
                log_msg = "Cross origin websockets not allowed"
                self.finish(log_msg)
                gen_log.debug(log_msg)
                return
    
            self.ws_connection = self.get_websocket_protocol()
            if self.ws_connection:
                await self.ws_connection.accept_connection(self)
            else:
                self.set_status(426, "Upgrade Required")
                self.set_header("Sec-WebSocket-Version", "7, 8, 13")
    
        @property
        def ping_interval(self) -> Optional[float]:
            """The interval for websocket keep-alive pings.
    
            Set websocket_ping_interval = 0 to disable pings.
            """
            return self.settings.get("websocket_ping_interval", None)
    
        @property
        def ping_timeout(self) -> Optional[float]:
            """If no ping is received in this many seconds,
            close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
            Default is max of 3 pings or 30 seconds.
            """
            return self.settings.get("websocket_ping_timeout", None)
    
        @property
        def max_message_size(self) -> int:
            """Maximum allowed message size.
    
            If the remote peer sends a message larger than this, the connection
            will be closed.
    
            Default is 10MiB.
            """
            return self.settings.get(
                "websocket_max_message_size", _default_max_message_size
            )
    
        def write_message(
            self, message: Union[bytes, str, Dict[str, Any]], binary: bool = False
        ) -> "Future[None]":
            """Sends the given message to the client of this Web Socket.
    
            The message may be either a string or a dict (which will be
            encoded as json).  If the ``binary`` argument is false, the
            message will be sent as utf8; in binary mode any byte string
            is allowed.
    
            If the connection is already closed, raises `WebSocketClosedError`.
            Returns a `.Future` which can be used for flow control.
    
            .. versionchanged:: 3.2
               `WebSocketClosedError` was added (previously a closed connection
               would raise an `AttributeError`)
    
            .. versionchanged:: 4.3
               Returns a `.Future` which can be used for flow control.
    
            .. versionchanged:: 5.0
               Consistently raises `WebSocketClosedError`. Previously could
               sometimes raise `.StreamClosedError`.
            """
            if self.ws_connection is None or self.ws_connection.is_closing():
                raise WebSocketClosedError()
            if isinstance(message, dict):
                message = tornado.escape.json_encode(message)
            return self.ws_connection.write_message(message, binary=binary)
    
        def select_subprotocol(self, subprotocols: List[str]) -> Optional[str]:
            """Override to implement subprotocol negotiation.
    
            ``subprotocols`` is a list of strings identifying the
            subprotocols proposed by the client.  This method may be
            overridden to return one of those strings to select it, or
            ``None`` to not select a subprotocol.
    
            Failure to select a subprotocol does not automatically abort
            the connection, although clients may close the connection if
            none of their proposed subprotocols was selected.
    
            The list may be empty, in which case this method must return
            None. This method is always called exactly once even if no
            subprotocols were proposed so that the handler can be advised
            of this fact.
    
            .. versionchanged:: 5.1
    
               Previously, this method was called with a list containing
               an empty string instead of an empty list if no subprotocols
               were proposed by the client.
            """
            return None
    
        @property
        def selected_subprotocol(self) -> Optional[str]:
            """The subprotocol returned by `select_subprotocol`.
    
            .. versionadded:: 5.1
            """
            assert self.ws_connection is not None
            return self.ws_connection.selected_subprotocol
    
        def get_compression_options(self) -> Optional[Dict[str, Any]]:
            """Override to return compression options for the connection.
    
            If this method returns None (the default), compression will
            be disabled.  If it returns a dict (even an empty one), it
            will be enabled.  The contents of the dict may be used to
            control the following compression options:
    
            ``compression_level`` specifies the compression level.
    
            ``mem_level`` specifies the amount of memory used for the internal compression state.
    
             These parameters are documented in details here:
             https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
    
            .. versionadded:: 4.1
    
            .. versionchanged:: 4.5
    
               Added ``compression_level`` and ``mem_level``.
            """
            # TODO: Add wbits option.
            return None
    
        def open(self, *args: str, **kwargs: str) -> Optional[Awaitable[None]]:
            """Invoked when a new WebSocket is opened.
    
            The arguments to `open` are extracted from the `tornado.web.URLSpec`
            regular expression, just like the arguments to
            `tornado.web.RequestHandler.get`.
    
            `open` may be a coroutine. `on_message` will not be called until
            `open` has returned.
    
            .. versionchanged:: 5.1
    
               ``open`` may be a coroutine.
            """
            pass
    
        def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
            """Handle incoming messages on the WebSocket
    
            This method must be overridden.
    
            .. versionchanged:: 4.5
    
               ``on_message`` can be a coroutine.
            """
            raise NotImplementedError
    
        def ping(self, data: Union[str, bytes] = b"") -> None:
            """Send ping frame to the remote end.
    
            The data argument allows a small amount of data (up to 125
            bytes) to be sent as a part of the ping message. Note that not
            all websocket implementations expose this data to
            applications.
    
            Consider using the ``websocket_ping_interval`` application
            setting instead of sending pings manually.
    
            .. versionchanged:: 5.1
    
               The data argument is now optional.
    
            """
            data = utf8(data)
            if self.ws_connection is None or self.ws_connection.is_closing():
                raise WebSocketClosedError()
            self.ws_connection.write_ping(data)
    
        def on_pong(self, data: bytes) -> None:
            """Invoked when the response to a ping frame is received."""
            pass
    
        def on_ping(self, data: bytes) -> None:
            """Invoked when the a ping frame is received."""
            pass
    
        def on_close(self) -> None:
            """Invoked when the WebSocket is closed.
    
            If the connection was closed cleanly and a status code or reason
            phrase was supplied, these values will be available as the attributes
            ``self.close_code`` and ``self.close_reason``.
    
            .. versionchanged:: 4.0
    
               Added ``close_code`` and ``close_reason`` attributes.
            """
            pass
    
        def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
            """Closes this Web Socket.
    
            Once the close handshake is successful the socket will be closed.
    
            ``code`` may be a numeric status code, taken from the values
            defined in `RFC 6455 section 7.4.1
            `_.
            ``reason`` may be a textual message about why the connection is
            closing.  These values are made available to the client, but are
            not otherwise interpreted by the websocket protocol.
    
            .. versionchanged:: 4.0
    
               Added the ``code`` and ``reason`` arguments.
            """
            if self.ws_connection:
                self.ws_connection.close(code, reason)
                self.ws_connection = None
    
        def check_origin(self, origin: str) -> bool:
            """Override to enable support for allowing alternate origins.
    
            The ``origin`` argument is the value of the ``Origin`` HTTP
            header, the url responsible for initiating this request.  This
            method is not called for clients that do not send this header;
            such requests are always allowed (because all browsers that
            implement WebSockets support this header, and non-browser
            clients do not have the same cross-site security concerns).
    
            Should return ``True`` to accept the request or ``False`` to
            reject it. By default, rejects all requests with an origin on
            a host other than this one.
    
            This is a security protection against cross site scripting attacks on
            browsers, since WebSockets are allowed to bypass the usual same-origin
            policies and don't use CORS headers.
    
            .. warning::
    
               This is an important security measure; don't disable it
               without understanding the security implications. In
               particular, if your authentication is cookie-based, you
               must either restrict the origins allowed by
               ``check_origin()`` or implement your own XSRF-like
               protection for websocket connections. See `these
               `_
               `articles
               `_
               for more.
    
            To accept all cross-origin traffic (which was the default prior to
            Tornado 4.0), simply override this method to always return ``True``::
    
                def check_origin(self, origin):
                    return True
    
            To allow connections from any subdomain of your site, you might
            do something like::
    
                def check_origin(self, origin):
                    parsed_origin = urllib.parse.urlparse(origin)
                    return parsed_origin.netloc.endswith(".mydomain.com")
    
            .. versionadded:: 4.0
    
            """
            parsed_origin = urlparse(origin)
            origin = parsed_origin.netloc
            origin = origin.lower()
    
            host = self.request.headers.get("Host")
    
            # Check to see that origin matches host directly, including ports
            return origin == host
    
        def set_nodelay(self, value: bool) -> None:
            """Set the no-delay flag for this stream.
    
            By default, small messages may be delayed and/or combined to minimize
            the number of packets sent.  This can sometimes cause 200-500ms delays
            due to the interaction between Nagle's algorithm and TCP delayed
            ACKs.  To reduce this delay (at the expense of possibly increasing
            bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
            connection is established.
    
            See `.BaseIOStream.set_nodelay` for additional details.
    
            .. versionadded:: 3.1
            """
            assert self.ws_connection is not None
            self.ws_connection.set_nodelay(value)
    
        def on_connection_close(self) -> None:
            if self.ws_connection:
                self.ws_connection.on_connection_close()
                self.ws_connection = None
            if not self._on_close_called:
                self._on_close_called = True
                self.on_close()
                self._break_cycles()
    
        def on_ws_connection_close(
            self, close_code: Optional[int] = None, close_reason: Optional[str] = None
        ) -> None:
            self.close_code = close_code
            self.close_reason = close_reason
            self.on_connection_close()
    
        def _break_cycles(self) -> None:
            # WebSocketHandlers call finish() early, but we don't want to
            # break up reference cycles (which makes it impossible to call
            # self.render_string) until after we've really closed the
            # connection (if it was established in the first place,
            # indicated by status code 101).
            if self.get_status() != 101 or self._on_close_called:
                super()._break_cycles()
    
        def send_error(self, *args: Any, **kwargs: Any) -> None:
            if self.stream is None:
                super().send_error(*args, **kwargs)
            else:
                # If we get an uncaught exception during the handshake,
                # we have no choice but to abruptly close the connection.
                # TODO: for uncaught exceptions after the handshake,
                # we can close the connection more gracefully.
                self.stream.close()
    
        def get_websocket_protocol(self) -> Optional["WebSocketProtocol"]:
            websocket_version = self.request.headers.get("Sec-WebSocket-Version")
            if websocket_version in ("7", "8", "13"):
                params = _WebSocketParams(
                    ping_interval=self.ping_interval,
                    ping_timeout=self.ping_timeout,
                    max_message_size=self.max_message_size,
                    compression_options=self.get_compression_options(),
                )
                return WebSocketProtocol13(self, False, params)
            return None
    
        def _detach_stream(self) -> IOStream:
            # disable non-WS methods
            for method in [
                "write",
                "redirect",
                "set_header",
                "set_cookie",
                "set_status",
                "flush",
                "finish",
            ]:
                setattr(self, method, _raise_not_supported_for_websockets)
            return self.detach()
    
    
    def _raise_not_supported_for_websockets(*args: Any, **kwargs: Any) -> None:
        raise RuntimeError("Method not supported for Web Sockets")
    
    
    class WebSocketProtocol(abc.ABC):
        """Base class for WebSocket protocol versions.
        """
    
        def __init__(self, handler: "_WebSocketDelegate") -> None:
            self.handler = handler
            self.stream = None  # type: Optional[IOStream]
            self.client_terminated = False
            self.server_terminated = False
    
        def _run_callback(
            self, callback: Callable, *args: Any, **kwargs: Any
        ) -> "Optional[Future[Any]]":
            """Runs the given callback with exception handling.
    
            If the callback is a coroutine, returns its Future. On error, aborts the
            websocket connection and returns None.
            """
            try:
                result = callback(*args, **kwargs)
            except Exception:
                self.handler.log_exception(*sys.exc_info())
                self._abort()
                return None
            else:
                if result is not None:
                    result = gen.convert_yielded(result)
                    assert self.stream is not None
                    self.stream.io_loop.add_future(result, lambda f: f.result())
                return result
    
        def on_connection_close(self) -> None:
            self._abort()
    
        def _abort(self) -> None:
            """Instantly aborts the WebSocket connection by closing the socket"""
            self.client_terminated = True
            self.server_terminated = True
            if self.stream is not None:
                self.stream.close()  # forcibly tear down the connection
            self.close()  # let the subclass cleanup
    
        @abc.abstractmethod
        def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
            raise NotImplementedError()
    
        @abc.abstractmethod
        def is_closing(self) -> bool:
            raise NotImplementedError()
    
        @abc.abstractmethod
        async def accept_connection(self, handler: WebSocketHandler) -> None:
            raise NotImplementedError()
    
        @abc.abstractmethod
        def write_message(
            self, message: Union[str, bytes], binary: bool = False
        ) -> "Future[None]":
            raise NotImplementedError()
    
        @property
        @abc.abstractmethod
        def selected_subprotocol(self) -> Optional[str]:
            raise NotImplementedError()
    
        @abc.abstractmethod
        def write_ping(self, data: bytes) -> None:
            raise NotImplementedError()
    
        # The entry points below are used by WebSocketClientConnection,
        # which was introduced after we only supported a single version of
        # WebSocketProtocol. The WebSocketProtocol/WebSocketProtocol13
        # boundary is currently pretty ad-hoc.
        @abc.abstractmethod
        def _process_server_headers(
            self, key: Union[str, bytes], headers: httputil.HTTPHeaders
        ) -> None:
            raise NotImplementedError()
    
        @abc.abstractmethod
        def start_pinging(self) -> None:
            raise NotImplementedError()
    
        @abc.abstractmethod
        async def _receive_frame_loop(self) -> None:
            raise NotImplementedError()
    
        @abc.abstractmethod
        def set_nodelay(self, x: bool) -> None:
            raise NotImplementedError()
    
    
    class _PerMessageDeflateCompressor(object):
        def __init__(
            self,
            persistent: bool,
            max_wbits: Optional[int],
            compression_options: Optional[Dict[str, Any]] = None,
        ) -> None:
            if max_wbits is None:
                max_wbits = zlib.MAX_WBITS
            # There is no symbolic constant for the minimum wbits value.
            if not (8 <= max_wbits <= zlib.MAX_WBITS):
                raise ValueError(
                    "Invalid max_wbits value %r; allowed range 8-%d",
                    max_wbits,
                    zlib.MAX_WBITS,
                )
            self._max_wbits = max_wbits
    
            if (
                compression_options is None
                or "compression_level" not in compression_options
            ):
                self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL
            else:
                self._compression_level = compression_options["compression_level"]
    
            if compression_options is None or "mem_level" not in compression_options:
                self._mem_level = 8
            else:
                self._mem_level = compression_options["mem_level"]
    
            if persistent:
                self._compressor = self._create_compressor()  # type: Optional[_Compressor]
            else:
                self._compressor = None
    
        def _create_compressor(self) -> "_Compressor":
            return zlib.compressobj(
                self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level
            )
    
        def compress(self, data: bytes) -> bytes:
            compressor = self._compressor or self._create_compressor()
            data = compressor.compress(data) + compressor.flush(zlib.Z_SYNC_FLUSH)
            assert data.endswith(b"\x00\x00\xff\xff")
            return data[:-4]
    
    
    class _PerMessageDeflateDecompressor(object):
        def __init__(
            self,
            persistent: bool,
            max_wbits: Optional[int],
            max_message_size: int,
            compression_options: Optional[Dict[str, Any]] = None,
        ) -> None:
            self._max_message_size = max_message_size
            if max_wbits is None:
                max_wbits = zlib.MAX_WBITS
            if not (8 <= max_wbits <= zlib.MAX_WBITS):
                raise ValueError(
                    "Invalid max_wbits value %r; allowed range 8-%d",
                    max_wbits,
                    zlib.MAX_WBITS,
                )
            self._max_wbits = max_wbits
            if persistent:
                self._decompressor = (
                    self._create_decompressor()
                )  # type: Optional[_Decompressor]
            else:
                self._decompressor = None
    
        def _create_decompressor(self) -> "_Decompressor":
            return zlib.decompressobj(-self._max_wbits)
    
        def decompress(self, data: bytes) -> bytes:
            decompressor = self._decompressor or self._create_decompressor()
            result = decompressor.decompress(
                data + b"\x00\x00\xff\xff", self._max_message_size
            )
            if decompressor.unconsumed_tail:
                raise _DecompressTooLargeError()
            return result
    
    
    class WebSocketProtocol13(WebSocketProtocol):
        """Implementation of the WebSocket protocol from RFC 6455.
    
        This class supports versions 7 and 8 of the protocol in addition to the
        final version 13.
        """
    
        # Bit masks for the first byte of a frame.
        FIN = 0x80
        RSV1 = 0x40
        RSV2 = 0x20
        RSV3 = 0x10
        RSV_MASK = RSV1 | RSV2 | RSV3
        OPCODE_MASK = 0x0F
    
        stream = None  # type: IOStream
    
        def __init__(
            self,
            handler: "_WebSocketDelegate",
            mask_outgoing: bool,
            params: _WebSocketParams,
        ) -> None:
            WebSocketProtocol.__init__(self, handler)
            self.mask_outgoing = mask_outgoing
            self.params = params
            self._final_frame = False
            self._frame_opcode = None
            self._masked_frame = None
            self._frame_mask = None  # type: Optional[bytes]
            self._frame_length = None
            self._fragmented_message_buffer = None  # type: Optional[bytes]
            self._fragmented_message_opcode = None
            self._waiting = None  # type: object
            self._compression_options = params.compression_options
            self._decompressor = None  # type: Optional[_PerMessageDeflateDecompressor]
            self._compressor = None  # type: Optional[_PerMessageDeflateCompressor]
            self._frame_compressed = None  # type: Optional[bool]
            # The total uncompressed size of all messages received or sent.
            # Unicode messages are encoded to utf8.
            # Only for testing; subject to change.
            self._message_bytes_in = 0
            self._message_bytes_out = 0
            # The total size of all packets received or sent.  Includes
            # the effect of compression, frame overhead, and control frames.
            self._wire_bytes_in = 0
            self._wire_bytes_out = 0
            self.ping_callback = None  # type: Optional[PeriodicCallback]
            self.last_ping = 0.0
            self.last_pong = 0.0
            self.close_code = None  # type: Optional[int]
            self.close_reason = None  # type: Optional[str]
    
        # Use a property for this to satisfy the abc.
        @property
        def selected_subprotocol(self) -> Optional[str]:
            return self._selected_subprotocol
    
        @selected_subprotocol.setter
        def selected_subprotocol(self, value: Optional[str]) -> None:
            self._selected_subprotocol = value
    
        async def accept_connection(self, handler: WebSocketHandler) -> None:
            try:
                self._handle_websocket_headers(handler)
            except ValueError:
                handler.set_status(400)
                log_msg = "Missing/Invalid WebSocket headers"
                handler.finish(log_msg)
                gen_log.debug(log_msg)
                return
    
            try:
                await self._accept_connection(handler)
            except asyncio.CancelledError:
                self._abort()
                return
            except ValueError:
                gen_log.debug("Malformed WebSocket request received", exc_info=True)
                self._abort()
                return
    
        def _handle_websocket_headers(self, handler: WebSocketHandler) -> None:
            """Verifies all invariant- and required headers
    
            If a header is missing or have an incorrect value ValueError will be
            raised
            """
            fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
            if not all(map(lambda f: handler.request.headers.get(f), fields)):
                raise ValueError("Missing/Invalid WebSocket headers")
    
        @staticmethod
        def compute_accept_value(key: Union[str, bytes]) -> str:
            """Computes the value for the Sec-WebSocket-Accept header,
            given the value for Sec-WebSocket-Key.
            """
            sha1 = hashlib.sha1()
            sha1.update(utf8(key))
            sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11")  # Magic value
            return native_str(base64.b64encode(sha1.digest()))
    
        def _challenge_response(self, handler: WebSocketHandler) -> str:
            return WebSocketProtocol13.compute_accept_value(
                cast(str, handler.request.headers.get("Sec-Websocket-Key"))
            )
    
        async def _accept_connection(self, handler: WebSocketHandler) -> None:
            subprotocol_header = handler.request.headers.get("Sec-WebSocket-Protocol")
            if subprotocol_header:
                subprotocols = [s.strip() for s in subprotocol_header.split(",")]
            else:
                subprotocols = []
            self.selected_subprotocol = handler.select_subprotocol(subprotocols)
            if self.selected_subprotocol:
                assert self.selected_subprotocol in subprotocols
                handler.set_header("Sec-WebSocket-Protocol", self.selected_subprotocol)
    
            extensions = self._parse_extensions_header(handler.request.headers)
            for ext in extensions:
                if ext[0] == "permessage-deflate" and self._compression_options is not None:
                    # TODO: negotiate parameters if compression_options
                    # specifies limits.
                    self._create_compressors("server", ext[1], self._compression_options)
                    if (
                        "client_max_window_bits" in ext[1]
                        and ext[1]["client_max_window_bits"] is None
                    ):
                        # Don't echo an offered client_max_window_bits
                        # parameter with no value.
                        del ext[1]["client_max_window_bits"]
                    handler.set_header(
                        "Sec-WebSocket-Extensions",
                        httputil._encode_header("permessage-deflate", ext[1]),
                    )
                    break
    
            handler.clear_header("Content-Type")
            handler.set_status(101)
            handler.set_header("Upgrade", "websocket")
            handler.set_header("Connection", "Upgrade")
            handler.set_header("Sec-WebSocket-Accept", self._challenge_response(handler))
            handler.finish()
    
            self.stream = handler._detach_stream()
    
            self.start_pinging()
            try:
                open_result = handler.open(*handler.open_args, **handler.open_kwargs)
                if open_result is not None:
                    await open_result
            except Exception:
                handler.log_exception(*sys.exc_info())
                self._abort()
                return
    
            await self._receive_frame_loop()
    
        def _parse_extensions_header(
            self, headers: httputil.HTTPHeaders
        ) -> List[Tuple[str, Dict[str, str]]]:
            extensions = headers.get("Sec-WebSocket-Extensions", "")
            if extensions:
                return [httputil._parse_header(e.strip()) for e in extensions.split(",")]
            return []
    
        def _process_server_headers(
            self, key: Union[str, bytes], headers: httputil.HTTPHeaders
        ) -> None:
            """Process the headers sent by the server to this client connection.
    
            'key' is the websocket handshake challenge/response key.
            """
            assert headers["Upgrade"].lower() == "websocket"
            assert headers["Connection"].lower() == "upgrade"
            accept = self.compute_accept_value(key)
            assert headers["Sec-Websocket-Accept"] == accept
    
            extensions = self._parse_extensions_header(headers)
            for ext in extensions:
                if ext[0] == "permessage-deflate" and self._compression_options is not None:
                    self._create_compressors("client", ext[1])
                else:
                    raise ValueError("unsupported extension %r", ext)
    
            self.selected_subprotocol = headers.get("Sec-WebSocket-Protocol", None)
    
        def _get_compressor_options(
            self,
            side: str,
            agreed_parameters: Dict[str, Any],
            compression_options: Optional[Dict[str, Any]] = None,
        ) -> Dict[str, Any]:
            """Converts a websocket agreed_parameters set to keyword arguments
            for our compressor objects.
            """
            options = dict(
                persistent=(side + "_no_context_takeover") not in agreed_parameters
            )  # type: Dict[str, Any]
            wbits_header = agreed_parameters.get(side + "_max_window_bits", None)
            if wbits_header is None:
                options["max_wbits"] = zlib.MAX_WBITS
            else:
                options["max_wbits"] = int(wbits_header)
            options["compression_options"] = compression_options
            return options
    
        def _create_compressors(
            self,
            side: str,
            agreed_parameters: Dict[str, Any],
            compression_options: Optional[Dict[str, Any]] = None,
        ) -> None:
            # TODO: handle invalid parameters gracefully
            allowed_keys = set(
                [
                    "server_no_context_takeover",
                    "client_no_context_takeover",
                    "server_max_window_bits",
                    "client_max_window_bits",
                ]
            )
            for key in agreed_parameters:
                if key not in allowed_keys:
                    raise ValueError("unsupported compression parameter %r" % key)
            other_side = "client" if (side == "server") else "server"
            self._compressor = _PerMessageDeflateCompressor(
                **self._get_compressor_options(side, agreed_parameters, compression_options)
            )
            self._decompressor = _PerMessageDeflateDecompressor(
                max_message_size=self.params.max_message_size,
                **self._get_compressor_options(
                    other_side, agreed_parameters, compression_options
                )
            )
    
        def _write_frame(
            self, fin: bool, opcode: int, data: bytes, flags: int = 0
        ) -> "Future[None]":
            data_len = len(data)
            if opcode & 0x8:
                # All control frames MUST have a payload length of 125
                # bytes or less and MUST NOT be fragmented.
                if not fin:
                    raise ValueError("control frames may not be fragmented")
                if data_len > 125:
                    raise ValueError("control frame payloads may not exceed 125 bytes")
            if fin:
                finbit = self.FIN
            else:
                finbit = 0
            frame = struct.pack("B", finbit | opcode | flags)
            if self.mask_outgoing:
                mask_bit = 0x80
            else:
                mask_bit = 0
            if data_len < 126:
                frame += struct.pack("B", data_len | mask_bit)
            elif data_len <= 0xFFFF:
                frame += struct.pack("!BH", 126 | mask_bit, data_len)
            else:
                frame += struct.pack("!BQ", 127 | mask_bit, data_len)
            if self.mask_outgoing:
                mask = os.urandom(4)
                data = mask + _websocket_mask(mask, data)
            frame += data
            self._wire_bytes_out += len(frame)
            return self.stream.write(frame)
    
        def write_message(
            self, message: Union[str, bytes], binary: bool = False
        ) -> "Future[None]":
            """Sends the given message to the client of this Web Socket."""
            if binary:
                opcode = 0x2
            else:
                opcode = 0x1
            message = tornado.escape.utf8(message)
            assert isinstance(message, bytes)
            self._message_bytes_out += len(message)
            flags = 0
            if self._compressor:
                message = self._compressor.compress(message)
                flags |= self.RSV1
            # For historical reasons, write methods in Tornado operate in a semi-synchronous
            # mode in which awaiting the Future they return is optional (But errors can
            # still be raised). This requires us to go through an awkward dance here
            # to transform the errors that may be returned while presenting the same
            # semi-synchronous interface.
            try:
                fut = self._write_frame(True, opcode, message, flags=flags)
            except StreamClosedError:
                raise WebSocketClosedError()
    
            async def wrapper() -> None:
                try:
                    await fut
                except StreamClosedError:
                    raise WebSocketClosedError()
    
            return asyncio.ensure_future(wrapper())
    
        def write_ping(self, data: bytes) -> None:
            """Send ping frame."""
            assert isinstance(data, bytes)
            self._write_frame(True, 0x9, data)
    
        async def _receive_frame_loop(self) -> None:
            try:
                while not self.client_terminated:
                    await self._receive_frame()
            except StreamClosedError:
                self._abort()
            self.handler.on_ws_connection_close(self.close_code, self.close_reason)
    
        async def _read_bytes(self, n: int) -> bytes:
            data = await self.stream.read_bytes(n)
            self._wire_bytes_in += n
            return data
    
        async def _receive_frame(self) -> None:
            # Read the frame header.
            data = await self._read_bytes(2)
            header, mask_payloadlen = struct.unpack("BB", data)
            is_final_frame = header & self.FIN
            reserved_bits = header & self.RSV_MASK
            opcode = header & self.OPCODE_MASK
            opcode_is_control = opcode & 0x8
            if self._decompressor is not None and opcode != 0:
                # Compression flag is present in the first frame's header,
                # but we can't decompress until we have all the frames of
                # the message.
                self._frame_compressed = bool(reserved_bits & self.RSV1)
                reserved_bits &= ~self.RSV1
            if reserved_bits:
                # client is using as-yet-undefined extensions; abort
                self._abort()
                return
            is_masked = bool(mask_payloadlen & 0x80)
            payloadlen = mask_payloadlen & 0x7F
    
            # Parse and validate the length.
            if opcode_is_control and payloadlen >= 126:
                # control frames must have payload < 126
                self._abort()
                return
            if payloadlen < 126:
                self._frame_length = payloadlen
            elif payloadlen == 126:
                data = await self._read_bytes(2)
                payloadlen = struct.unpack("!H", data)[0]
            elif payloadlen == 127:
                data = await self._read_bytes(8)
                payloadlen = struct.unpack("!Q", data)[0]
            new_len = payloadlen
            if self._fragmented_message_buffer is not None:
                new_len += len(self._fragmented_message_buffer)
            if new_len > self.params.max_message_size:
                self.close(1009, "message too big")
                self._abort()
                return
    
            # Read the payload, unmasking if necessary.
            if is_masked:
                self._frame_mask = await self._read_bytes(4)
            data = await self._read_bytes(payloadlen)
            if is_masked:
                assert self._frame_mask is not None
                data = _websocket_mask(self._frame_mask, data)
    
            # Decide what to do with this frame.
            if opcode_is_control:
                # control frames may be interleaved with a series of fragmented
                # data frames, so control frames must not interact with
                # self._fragmented_*
                if not is_final_frame:
                    # control frames must not be fragmented
                    self._abort()
                    return
            elif opcode == 0:  # continuation frame
                if self._fragmented_message_buffer is None:
                    # nothing to continue
                    self._abort()
                    return
                self._fragmented_message_buffer += data
                if is_final_frame:
                    opcode = self._fragmented_message_opcode
                    data = self._fragmented_message_buffer
                    self._fragmented_message_buffer = None
            else:  # start of new data message
                if self._fragmented_message_buffer is not None:
                    # can't start new message until the old one is finished
                    self._abort()
                    return
                if not is_final_frame:
                    self._fragmented_message_opcode = opcode
                    self._fragmented_message_buffer = data
    
            if is_final_frame:
                handled_future = self._handle_message(opcode, data)
                if handled_future is not None:
                    await handled_future
    
        def _handle_message(self, opcode: int, data: bytes) -> "Optional[Future[None]]":
            """Execute on_message, returning its Future if it is a coroutine."""
            if self.client_terminated:
                return None
    
            if self._frame_compressed:
                assert self._decompressor is not None
                try:
                    data = self._decompressor.decompress(data)
                except _DecompressTooLargeError:
                    self.close(1009, "message too big after decompression")
                    self._abort()
                    return None
    
            if opcode == 0x1:
                # UTF-8 data
                self._message_bytes_in += len(data)
                try:
                    decoded = data.decode("utf-8")
                except UnicodeDecodeError:
                    self._abort()
                    return None
                return self._run_callback(self.handler.on_message, decoded)
            elif opcode == 0x2:
                # Binary data
                self._message_bytes_in += len(data)
                return self._run_callback(self.handler.on_message, data)
            elif opcode == 0x8:
                # Close
                self.client_terminated = True
                if len(data) >= 2:
                    self.close_code = struct.unpack(">H", data[:2])[0]
                if len(data) > 2:
                    self.close_reason = to_unicode(data[2:])
                # Echo the received close code, if any (RFC 6455 section 5.5.1).
                self.close(self.close_code)
            elif opcode == 0x9:
                # Ping
                try:
                    self._write_frame(True, 0xA, data)
                except StreamClosedError:
                    self._abort()
                self._run_callback(self.handler.on_ping, data)
            elif opcode == 0xA:
                # Pong
                self.last_pong = IOLoop.current().time()
                return self._run_callback(self.handler.on_pong, data)
            else:
                self._abort()
            return None
    
        def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
            """Closes the WebSocket connection."""
            if not self.server_terminated:
                if not self.stream.closed():
                    if code is None and reason is not None:
                        code = 1000  # "normal closure" status code
                    if code is None:
                        close_data = b""
                    else:
                        close_data = struct.pack(">H", code)
                    if reason is not None:
                        close_data += utf8(reason)
                    try:
                        self._write_frame(True, 0x8, close_data)
                    except StreamClosedError:
                        self._abort()
                self.server_terminated = True
            if self.client_terminated:
                if self._waiting is not None:
                    self.stream.io_loop.remove_timeout(self._waiting)
                    self._waiting = None
                self.stream.close()
            elif self._waiting is None:
                # Give the client a few seconds to complete a clean shutdown,
                # otherwise just close the connection.
                self._waiting = self.stream.io_loop.add_timeout(
                    self.stream.io_loop.time() + 5, self._abort
                )
            if self.ping_callback:
                self.ping_callback.stop()
                self.ping_callback = None
    
        def is_closing(self) -> bool:
            """Return ``True`` if this connection is closing.
    
            The connection is considered closing if either side has
            initiated its closing handshake or if the stream has been
            shut down uncleanly.
            """
            return self.stream.closed() or self.client_terminated or self.server_terminated
    
        @property
        def ping_interval(self) -> Optional[float]:
            interval = self.params.ping_interval
            if interval is not None:
                return interval
            return 0
    
        @property
        def ping_timeout(self) -> Optional[float]:
            timeout = self.params.ping_timeout
            if timeout is not None:
                return timeout
            assert self.ping_interval is not None
            return max(3 * self.ping_interval, 30)
    
        def start_pinging(self) -> None:
            """Start sending periodic pings to keep the connection alive"""
            assert self.ping_interval is not None
            if self.ping_interval > 0:
                self.last_ping = self.last_pong = IOLoop.current().time()
                self.ping_callback = PeriodicCallback(
                    self.periodic_ping, self.ping_interval * 1000
                )
                self.ping_callback.start()
    
        def periodic_ping(self) -> None:
            """Send a ping to keep the websocket alive
    
            Called periodically if the websocket_ping_interval is set and non-zero.
            """
            if self.is_closing() and self.ping_callback is not None:
                self.ping_callback.stop()
                return
    
            # Check for timeout on pong. Make sure that we really have
            # sent a recent ping in case the machine with both server and
            # client has been suspended since the last ping.
            now = IOLoop.current().time()
            since_last_pong = now - self.last_pong
            since_last_ping = now - self.last_ping
            assert self.ping_interval is not None
            assert self.ping_timeout is not None
            if (
                since_last_ping < 2 * self.ping_interval
                and since_last_pong > self.ping_timeout
            ):
                self.close()
                return
    
            self.write_ping(b"")
            self.last_ping = now
    
        def set_nodelay(self, x: bool) -> None:
            self.stream.set_nodelay(x)
    
    
    class WebSocketClientConnection(simple_httpclient._HTTPConnection):
        """WebSocket client connection.
    
        This class should not be instantiated directly; use the
        `websocket_connect` function instead.
        """
    
        protocol = None  # type: WebSocketProtocol
    
        def __init__(
            self,
            request: httpclient.HTTPRequest,
            on_message_callback: Optional[Callable[[Union[None, str, bytes]], None]] = None,
            compression_options: Optional[Dict[str, Any]] = None,
            ping_interval: Optional[float] = None,
            ping_timeout: Optional[float] = None,
            max_message_size: int = _default_max_message_size,
            subprotocols: Optional[List[str]] = [],
        ) -> None:
            self.connect_future = Future()  # type: Future[WebSocketClientConnection]
            self.read_queue = Queue(1)  # type: Queue[Union[None, str, bytes]]
            self.key = base64.b64encode(os.urandom(16))
            self._on_message_callback = on_message_callback
            self.close_code = None  # type: Optional[int]
            self.close_reason = None  # type: Optional[str]
            self.params = _WebSocketParams(
                ping_interval=ping_interval,
                ping_timeout=ping_timeout,
                max_message_size=max_message_size,
                compression_options=compression_options,
            )
    
            scheme, sep, rest = request.url.partition(":")
            scheme = {"ws": "http", "wss": "https"}[scheme]
            request.url = scheme + sep + rest
            request.headers.update(
                {
                    "Upgrade": "websocket",
                    "Connection": "Upgrade",
                    "Sec-WebSocket-Key": self.key,
                    "Sec-WebSocket-Version": "13",
                }
            )
            if subprotocols is not None:
                request.headers["Sec-WebSocket-Protocol"] = ",".join(subprotocols)
            if compression_options is not None:
                # Always offer to let the server set our max_wbits (and even though
                # we don't offer it, we will accept a client_no_context_takeover
                # from the server).
                # TODO: set server parameters for deflate extension
                # if requested in self.compression_options.
                request.headers[
                    "Sec-WebSocket-Extensions"
                ] = "permessage-deflate; client_max_window_bits"
    
            # Websocket connection is currently unable to follow redirects
            request.follow_redirects = False
    
            self.tcp_client = TCPClient()
            super().__init__(
                None,
                request,
                lambda: None,
                self._on_http_response,
                104857600,
                self.tcp_client,
                65536,
                104857600,
            )
    
        def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
            """Closes the websocket connection.
    
            ``code`` and ``reason`` are documented under
            `WebSocketHandler.close`.
    
            .. versionadded:: 3.2
    
            .. versionchanged:: 4.0
    
               Added the ``code`` and ``reason`` arguments.
            """
            if self.protocol is not None:
                self.protocol.close(code, reason)
                self.protocol = None  # type: ignore
    
        def on_connection_close(self) -> None:
            if not self.connect_future.done():
                self.connect_future.set_exception(StreamClosedError())
            self._on_message(None)
            self.tcp_client.close()
            super().on_connection_close()
    
        def on_ws_connection_close(
            self, close_code: Optional[int] = None, close_reason: Optional[str] = None
        ) -> None:
            self.close_code = close_code
            self.close_reason = close_reason
            self.on_connection_close()
    
        def _on_http_response(self, response: httpclient.HTTPResponse) -> None:
            if not self.connect_future.done():
                if response.error:
                    self.connect_future.set_exception(response.error)
                else:
                    self.connect_future.set_exception(
                        WebSocketError("Non-websocket response")
                    )
    
        async def headers_received(
            self,
            start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
            headers: httputil.HTTPHeaders,
        ) -> None:
            assert isinstance(start_line, httputil.ResponseStartLine)
            if start_line.code != 101:
                await super().headers_received(start_line, headers)
                return
    
            if self._timeout is not None:
                self.io_loop.remove_timeout(self._timeout)
                self._timeout = None
    
            self.headers = headers
            self.protocol = self.get_websocket_protocol()
            self.protocol._process_server_headers(self.key, self.headers)
            self.protocol.stream = self.connection.detach()
    
            IOLoop.current().add_callback(self.protocol._receive_frame_loop)
            self.protocol.start_pinging()
    
            # Once we've taken over the connection, clear the final callback
            # we set on the http request.  This deactivates the error handling
            # in simple_httpclient that would otherwise interfere with our
            # ability to see exceptions.
            self.final_callback = None  # type: ignore
    
            future_set_result_unless_cancelled(self.connect_future, self)
    
        def write_message(
            self, message: Union[str, bytes], binary: bool = False
        ) -> "Future[None]":
            """Sends a message to the WebSocket server.
    
            If the stream is closed, raises `WebSocketClosedError`.
            Returns a `.Future` which can be used for flow control.
    
            .. versionchanged:: 5.0
               Exception raised on a closed stream changed from `.StreamClosedError`
               to `WebSocketClosedError`.
            """
            return self.protocol.write_message(message, binary=binary)
    
        def read_message(
            self,
            callback: Optional[Callable[["Future[Union[None, str, bytes]]"], None]] = None,
        ) -> Awaitable[Union[None, str, bytes]]:
            """Reads a message from the WebSocket server.
    
            If on_message_callback was specified at WebSocket
            initialization, this function will never return messages
    
            Returns a future whose result is the message, or None
            if the connection is closed.  If a callback argument
            is given it will be called with the future when it is
            ready.
            """
    
            awaitable = self.read_queue.get()
            if callback is not None:
                self.io_loop.add_future(asyncio.ensure_future(awaitable), callback)
            return awaitable
    
        def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
            return self._on_message(message)
    
        def _on_message(
            self, message: Union[None, str, bytes]
        ) -> Optional[Awaitable[None]]:
            if self._on_message_callback:
                self._on_message_callback(message)
                return None
            else:
                return self.read_queue.put(message)
    
        def ping(self, data: bytes = b"") -> None:
            """Send ping frame to the remote end.
    
            The data argument allows a small amount of data (up to 125
            bytes) to be sent as a part of the ping message. Note that not
            all websocket implementations expose this data to
            applications.
    
            Consider using the ``ping_interval`` argument to
            `websocket_connect` instead of sending pings manually.
    
            .. versionadded:: 5.1
    
            """
            data = utf8(data)
            if self.protocol is None:
                raise WebSocketClosedError()
            self.protocol.write_ping(data)
    
        def on_pong(self, data: bytes) -> None:
            pass
    
        def on_ping(self, data: bytes) -> None:
            pass
    
        def get_websocket_protocol(self) -> WebSocketProtocol:
            return WebSocketProtocol13(self, mask_outgoing=True, params=self.params)
    
        @property
        def selected_subprotocol(self) -> Optional[str]:
            """The subprotocol selected by the server.
    
            .. versionadded:: 5.1
            """
            return self.protocol.selected_subprotocol
    
        def log_exception(
            self,
            typ: "Optional[Type[BaseException]]",
            value: Optional[BaseException],
            tb: Optional[TracebackType],
        ) -> None:
            assert typ is not None
            assert value is not None
            app_log.error("Uncaught exception %s", value, exc_info=(typ, value, tb))
    
    
    def websocket_connect(
        url: Union[str, httpclient.HTTPRequest],
        callback: Optional[Callable[["Future[WebSocketClientConnection]"], None]] = None,
        connect_timeout: Optional[float] = None,
        on_message_callback: Optional[Callable[[Union[None, str, bytes]], None]] = None,
        compression_options: Optional[Dict[str, Any]] = None,
        ping_interval: Optional[float] = None,
        ping_timeout: Optional[float] = None,
        max_message_size: int = _default_max_message_size,
        subprotocols: Optional[List[str]] = None,
    ) -> "Awaitable[WebSocketClientConnection]":
        """Client-side websocket support.
    
        Takes a url and returns a Future whose result is a
        `WebSocketClientConnection`.
    
        ``compression_options`` is interpreted in the same way as the
        return value of `.WebSocketHandler.get_compression_options`.
    
        The connection supports two styles of operation. In the coroutine
        style, the application typically calls
        `~.WebSocketClientConnection.read_message` in a loop::
    
            conn = yield websocket_connect(url)
            while True:
                msg = yield conn.read_message()
                if msg is None: break
                # Do something with msg
    
        In the callback style, pass an ``on_message_callback`` to
        ``websocket_connect``. In both styles, a message of ``None``
        indicates that the connection has been closed.
    
        ``subprotocols`` may be a list of strings specifying proposed
        subprotocols. The selected protocol may be found on the
        ``selected_subprotocol`` attribute of the connection object
        when the connection is complete.
    
        .. versionchanged:: 3.2
           Also accepts ``HTTPRequest`` objects in place of urls.
    
        .. versionchanged:: 4.1
           Added ``compression_options`` and ``on_message_callback``.
    
        .. versionchanged:: 4.5
           Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
           arguments, which have the same meaning as in `WebSocketHandler`.
    
        .. versionchanged:: 5.0
           The ``io_loop`` argument (deprecated since version 4.1) has been removed.
    
        .. versionchanged:: 5.1
           Added the ``subprotocols`` argument.
        """
        if isinstance(url, httpclient.HTTPRequest):
            assert connect_timeout is None
            request = url
            # Copy and convert the headers dict/object (see comments in
            # AsyncHTTPClient.fetch)
            request.headers = httputil.HTTPHeaders(request.headers)
        else:
            request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
        request = cast(
            httpclient.HTTPRequest,
            httpclient._RequestProxy(request, httpclient.HTTPRequest._DEFAULTS),
        )
        conn = WebSocketClientConnection(
            request,
            on_message_callback=on_message_callback,
            compression_options=compression_options,
            ping_interval=ping_interval,
            ping_timeout=ping_timeout,
            max_message_size=max_message_size,
            subprotocols=subprotocols,
        )
        if callback is not None:
            IOLoop.current().add_future(conn.connect_future, callback)
        return conn.connect_future
    tornado-6.1.0/tornado/wsgi.py000066400000000000000000000171771374705040500161770ustar00rootroot00000000000000#
    # Copyright 2009 Facebook
    #
    # Licensed under the Apache License, Version 2.0 (the "License"); you may
    # not use this file except in compliance with the License. You may obtain
    # a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    # License for the specific language governing permissions and limitations
    # under the License.
    
    """WSGI support for the Tornado web framework.
    
    WSGI is the Python standard for web servers, and allows for interoperability
    between Tornado and other Python web frameworks and servers.
    
    This module provides WSGI support via the `WSGIContainer` class, which
    makes it possible to run applications using other WSGI frameworks on
    the Tornado HTTP server. The reverse is not supported; the Tornado
    `.Application` and `.RequestHandler` classes are designed for use with
    the Tornado `.HTTPServer` and cannot be used in a generic WSGI
    container.
    
    """
    
    import sys
    from io import BytesIO
    import tornado
    
    from tornado import escape
    from tornado import httputil
    from tornado.log import access_log
    
    from typing import List, Tuple, Optional, Callable, Any, Dict, Text
    from types import TracebackType
    import typing
    
    if typing.TYPE_CHECKING:
        from typing import Type  # noqa: F401
        from wsgiref.types import WSGIApplication as WSGIAppType  # noqa: F401
    
    
    # PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
    # that are smuggled inside objects of type unicode (via the latin1 encoding).
    # This function is like those in the tornado.escape module, but defined
    # here to minimize the temptation to use it in non-wsgi contexts.
    def to_wsgi_str(s: bytes) -> str:
        assert isinstance(s, bytes)
        return s.decode("latin1")
    
    
    class WSGIContainer(object):
        r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
    
        .. warning::
    
           WSGI is a *synchronous* interface, while Tornado's concurrency model
           is based on single-threaded asynchronous execution.  This means that
           running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
           than running the same app in a multi-threaded WSGI server like
           ``gunicorn`` or ``uwsgi``.  Use `WSGIContainer` only when there are
           benefits to combining Tornado and WSGI in the same process that
           outweigh the reduced scalability.
    
        Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
        run it. For example::
    
            def simple_app(environ, start_response):
                status = "200 OK"
                response_headers = [("Content-type", "text/plain")]
                start_response(status, response_headers)
                return ["Hello world!\n"]
    
            container = tornado.wsgi.WSGIContainer(simple_app)
            http_server = tornado.httpserver.HTTPServer(container)
            http_server.listen(8888)
            tornado.ioloop.IOLoop.current().start()
    
        This class is intended to let other frameworks (Django, web.py, etc)
        run on the Tornado HTTP server and I/O loop.
    
        The `tornado.web.FallbackHandler` class is often useful for mixing
        Tornado and WSGI apps in the same server.  See
        https://github.com/bdarnell/django-tornado-demo for a complete example.
        """
    
        def __init__(self, wsgi_application: "WSGIAppType") -> None:
            self.wsgi_application = wsgi_application
    
        def __call__(self, request: httputil.HTTPServerRequest) -> None:
            data = {}  # type: Dict[str, Any]
            response = []  # type: List[bytes]
    
            def start_response(
                status: str,
                headers: List[Tuple[str, str]],
                exc_info: Optional[
                    Tuple[
                        "Optional[Type[BaseException]]",
                        Optional[BaseException],
                        Optional[TracebackType],
                    ]
                ] = None,
            ) -> Callable[[bytes], Any]:
                data["status"] = status
                data["headers"] = headers
                return response.append
    
            app_response = self.wsgi_application(
                WSGIContainer.environ(request), start_response
            )
            try:
                response.extend(app_response)
                body = b"".join(response)
            finally:
                if hasattr(app_response, "close"):
                    app_response.close()  # type: ignore
            if not data:
                raise Exception("WSGI app did not call start_response")
    
            status_code_str, reason = data["status"].split(" ", 1)
            status_code = int(status_code_str)
            headers = data["headers"]  # type: List[Tuple[str, str]]
            header_set = set(k.lower() for (k, v) in headers)
            body = escape.utf8(body)
            if status_code != 304:
                if "content-length" not in header_set:
                    headers.append(("Content-Length", str(len(body))))
                if "content-type" not in header_set:
                    headers.append(("Content-Type", "text/html; charset=UTF-8"))
            if "server" not in header_set:
                headers.append(("Server", "TornadoServer/%s" % tornado.version))
    
            start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
            header_obj = httputil.HTTPHeaders()
            for key, value in headers:
                header_obj.add(key, value)
            assert request.connection is not None
            request.connection.write_headers(start_line, header_obj, chunk=body)
            request.connection.finish()
            self._log(status_code, request)
    
        @staticmethod
        def environ(request: httputil.HTTPServerRequest) -> Dict[Text, Any]:
            """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
            """
            hostport = request.host.split(":")
            if len(hostport) == 2:
                host = hostport[0]
                port = int(hostport[1])
            else:
                host = request.host
                port = 443 if request.protocol == "https" else 80
            environ = {
                "REQUEST_METHOD": request.method,
                "SCRIPT_NAME": "",
                "PATH_INFO": to_wsgi_str(
                    escape.url_unescape(request.path, encoding=None, plus=False)
                ),
                "QUERY_STRING": request.query,
                "REMOTE_ADDR": request.remote_ip,
                "SERVER_NAME": host,
                "SERVER_PORT": str(port),
                "SERVER_PROTOCOL": request.version,
                "wsgi.version": (1, 0),
                "wsgi.url_scheme": request.protocol,
                "wsgi.input": BytesIO(escape.utf8(request.body)),
                "wsgi.errors": sys.stderr,
                "wsgi.multithread": False,
                "wsgi.multiprocess": True,
                "wsgi.run_once": False,
            }
            if "Content-Type" in request.headers:
                environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
            if "Content-Length" in request.headers:
                environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
            for key, value in request.headers.items():
                environ["HTTP_" + key.replace("-", "_").upper()] = value
            return environ
    
        def _log(self, status_code: int, request: httputil.HTTPServerRequest) -> None:
            if status_code < 400:
                log_method = access_log.info
            elif status_code < 500:
                log_method = access_log.warning
            else:
                log_method = access_log.error
            request_time = 1000.0 * request.request_time()
            assert request.method is not None
            assert request.uri is not None
            summary = request.method + " " + request.uri + " (" + request.remote_ip + ")"
            log_method("%d %s %.2fms", status_code, summary, request_time)
    
    
    HTTPRequest = httputil.HTTPServerRequest
    tornado-6.1.0/tox.ini000066400000000000000000000121521374705040500145050ustar00rootroot00000000000000# Tox (https://tox.readthedocs.io) is a tool for running tests
    # in multiple virtualenvs.  This configuration file will run the tornado
    # test suite on all supported python versions.  To use it, "pip install tox"
    # and then run "tox" from this directory.
    #
    # This configuration requires tox 1.8 or higher.
    #
    # Installation tips:
    # When building pycurl on my macports-based setup, I need to either set the
    # environment variable ARCHFLAGS='-arch x86_64' or use
    # 'port install curl +universal' to get both 32- and 64-bit versions of
    # libcurl.
    [tox]
    envlist =
            # Basic configurations: Run the tests for each python version.
            py35-full,py36-full,py37-full,py38-full,py39-full,pypy3-full
    
            # Build and test the docs with sphinx.
            docs
    
            # Run the linters.
            lint
    
    # Allow shell commands in tests
    whitelist_externals = /bin/sh
    
    [testenv]
    basepython =
               py3: python3
               py35: python3.5
               py36: python3.6
               py37: python3.7
               py38: python3.8
               py39: python3.9
               pypy3: pypy3
               # In theory, it doesn't matter which python version is used here.
               # In practice, things like changes to the ast module can alter
               # the outputs of the tools (especially where exactly the
               # linter warning-suppression comments go), so we specify a
               # python version for these builds.
               docs: python3.8
               lint: python3.8
    
    deps =
         full: pycurl
         full: twisted
         full: pycares
         docs: -r{toxinidir}/docs/requirements.txt
         lint: flake8
         lint: black==19.10b0
         lint: mypy==0.740
    
    setenv =
           # Treat the extension as mandatory in testing (but not on pypy)
           {py3,py36,py37,py38,py39}: TORNADO_EXTENSION=1
           # CI workers are often overloaded and can cause our tests to exceed
           # the default timeout of 5s.
           ASYNC_TEST_TIMEOUT=25
           # Treat warnings as errors by default. We have a whitelist of
           # allowed warnings in runtests.py, but we want to be strict
           # about any import-time warnings before that setup code is
           # reached. Note that syntax warnings are only reported in
           # -opt builds because regular builds reuse pycs created
           # during sdist installation (and it doesn't seem to be
           # possible to set environment variables during that phase of
           # tox).
           # ResourceWarnings are too noisy on py35 so don't enable 
           # warnings-as-errors there.
           {py3,py36,py37,py38,py39,pypy3}: PYTHONWARNINGS=error:::tornado
    
    
    # All non-comment lines but the last must end in a backslash.
    # Tox filters line-by-line based on the environment name.
    commands =
             # py3*: -b turns on an extra warning when calling
             # str(bytes), and -bb makes it an error.
             python -bb -m tornado.test {posargs:}
             # Python's optimized mode disables the assert statement, so
             # run the tests in this mode to ensure we haven't fallen into
             # the trap of relying on an assertion's side effects or using
             # them for things that should be runtime errors.
             full: python -O -m tornado.test
             # In python 3, opening files in text mode uses a
             # system-dependent encoding by default.  Run the tests with "C"
             # (ascii) and "utf-8" locales to ensure we don't have hidden
             # dependencies on this setting.
             full: sh -c 'LANG=C python -m tornado.test'
             full: sh -c 'LANG=en_US.utf-8 python -m tornado.test'
             # Note that httpclient_test is always run with both client
             # implementations; this flag controls which client all the
             # other tests use.
             full: python -m tornado.test --httpclient=tornado.curl_httpclient.CurlAsyncHTTPClient
             full: python -m tornado.test --resolver=tornado.platform.caresresolver.CaresResolver
             # Run the tests once from the source directory to detect issues
             # involving relative __file__ paths; see
             # https://github.com/tornadoweb/tornado/issues/1780
             full: sh -c '(cd {toxinidir} && unset TORNADO_EXTENSION && python -m tornado.test)'
    
    
    # python will import relative to the current working directory by default,
    # so cd into the tox working directory to avoid picking up the working
    # copy of the files (especially important for the speedups module).
    changedir = {toxworkdir}
    
    # tox 1.6 passes --pre to pip by default, which currently has problems
    # installing pycurl (https://github.com/pypa/pip/issues/1405).
    # Remove it (it's not a part of {opts}) to only install real releases.
    install_command = pip install {opts} {packages}
    
    [testenv:docs]
    changedir = docs
    # For some reason the extension fails to load in this configuration,
    # but it's not really needed for docs anyway.
    setenv = TORNADO_EXTENSION=0
    commands =
        # Build the docs
        sphinx-build -q -E -n -W -b html . {envtmpdir}/html
        # Run the doctests. No -W for doctests because that disallows tests
        # with empty output.
        sphinx-build -q -E -n -b doctest . {envtmpdir}/doctest
    
    [testenv:lint]
    commands =
             flake8 {posargs:}
             black --check --diff {posargs:tornado demos}
             mypy {posargs:tornado}
    changedir = {toxinidir}