pax_global_header00006660000000000000000000000064132242060100014500gustar00rootroot0000000000000052 comment=8e9e75502ff910629663c4cdd7779d43ea2dd150 tornado-4.5.3/000077500000000000000000000000001322420601000131575ustar00rootroot00000000000000tornado-4.5.3/.coveragerc000066400000000000000000000007311322420601000153010ustar00rootroot00000000000000# Test coverage configuration. # Usage: # pip install coverage # coverage erase # clears previous data if any # coverage run -m tornado.test.runtests # coverage report # prints to stdout # coverage html # creates ./htmlcov/*.html including annotated source [run] branch = true source = tornado omit = tornado/platform/* tornado/test/* */_auto2to3* [report] # Ignore missing source files, i.e. fake template-generated "files" ignore_errors = true tornado-4.5.3/.gitignore000066400000000000000000000002351322420601000151470ustar00rootroot00000000000000*.pyc *.pyo *.so *.class *~ build/ /dist/ MANIFEST /tornado.egg-info/ .tox/ .vagrant /.coverage /htmlcov/ /env/ # Used in demo apps secrets.cfg .mypy_cache/ tornado-4.5.3/.travis.yml000066400000000000000000000121171322420601000152720ustar00rootroot00000000000000# https://travis-ci.org/tornadoweb/tornado language: python python: - 2.7 - pypy - 3.3 - 3.4 - 3.5 - 3.6 - nightly - pypy3.5-5.8.0 install: - if [[ $TRAVIS_PYTHON_VERSION == 2* ]]; then travis_retry pip install futures mock monotonic trollius; fi - if [[ $TRAVIS_PYTHON_VERSION == 'pypy' ]]; then travis_retry pip install futures mock; fi # TODO(bdarnell): pycares tests are currently disabled on travis due to ipv6 issues. #- if [[ $TRAVIS_PYTHON_VERSION != 'pypy'* ]]; then travis_retry pip install pycares; fi - if [[ $TRAVIS_PYTHON_VERSION != 'pypy'* ]]; then travis_retry pip install pycurl; fi # Twisted runs on 2.x and 3.3+, but is flaky on pypy. - if [[ $TRAVIS_PYTHON_VERSION != 'pypy'* ]]; then travis_retry pip install Twisted; fi - if [[ $TRAVIS_PYTHON_VERSION == '2.7' || $TRAVIS_PYTHON_VERSION == '3.5' || $TRAVIS_PYTHON_VERSION == '3.6' ]]; then travis_retry pip install sphinx sphinx_rtd_theme; fi # On travis the extension should always be built - if [[ $TRAVIS_PYTHON_VERSION != 'pypy'* ]]; then export TORNADO_EXTENSION=1; fi - travis_retry python setup.py install - travis_retry pip install codecov virtualenv # Create a separate no-dependencies virtualenv to make sure all imports # of optional-dependencies are guarded. (skipped on pypy3 because # virtualenv no longer supports py32) - if [[ $TRAVIS_PYTHON_VERSION != 'pypy3' ]]; then virtualenv ./nodeps; fi - if [[ $TRAVIS_PYTHON_VERSION != 'pypy3' ]]; then ./nodeps/bin/python -VV; fi - if [[ $TRAVIS_PYTHON_VERSION != 'pypy3' ]]; then ./nodeps/bin/python setup.py install; fi - curl-config --version; pip freeze script: # Run the tests once from the source directory to detect issues # involving relative __file__ paths; see # https://github.com/tornadoweb/tornado/issues/1780 - unset TORNADO_EXTENSION && python -m tornado.test # For all other test variants, get out of the source directory before # running tests to ensure that we get the installed speedups module # instead of the source directory which doesn't have it. - cd maint # Copy the coveragerc down so coverage.py can find it. - cp ../.coveragerc . - if [[ $TRAVIS_PYTHON_VERSION != 'pypy'* ]]; then export TORNADO_EXTENSION=1; fi - export TARGET="-m tornado.test.runtests" # Travis workers are often overloaded and cause our tests to exceed # the default timeout of 5s. - export ASYNC_TEST_TIMEOUT=15 # We use "python -m coverage" instead of the "bin/coverage" script # so we can pass additional arguments to python. # coverage needs a function that was removed in python 3.6 so we can't # run it with nightly cpython. - if [[ $TRAVIS_PYTHON_VERSION != nightly ]]; then export TARGET="-m coverage run $TARGET"; fi - python $TARGET - python $TARGET --ioloop=tornado.platform.select.SelectIOLoop - python -O $TARGET - LANG=C python $TARGET - LANG=en_US.utf-8 python $TARGET - if [[ $TRAVIS_PYTHON_VERSION == 3* ]]; then python -bb $TARGET; fi - if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then python $TARGET --resolver=tornado.netutil.ThreadedResolver; fi - if [[ $TRAVIS_PYTHON_VERSION == 2* ]]; then python $TARGET --httpclient=tornado.curl_httpclient.CurlAsyncHTTPClient; fi - if [[ $TRAVIS_PYTHON_VERSION == 2* ]]; then python $TARGET --ioloop_time_monotonic; fi - if [[ $TRAVIS_PYTHON_VERSION != 'pypy'* ]]; then python $TARGET --ioloop=tornado.platform.twisted.TwistedIOLoop; fi - if [[ $TRAVIS_PYTHON_VERSION == 3.4 || $TRAVIS_PYTHON_VERSION == 3.5 || $TRAVIS_PYTHON_VERSION == 3.6 ]]; then python $TARGET --ioloop=tornado.platform.asyncio.AsyncIOLoop; fi - if [[ $TRAVIS_PYTHON_VERSION == 2* ]]; then python $TARGET --ioloop=tornado.platform.asyncio.AsyncIOLoop; fi - if [[ $TRAVIS_PYTHON_VERSION == 2* ]]; then python $TARGET --resolver=tornado.platform.twisted.TwistedResolver; fi #- if [[ $TRAVIS_PYTHON_VERSION != pypy* ]]; then python $TARGET --resolver=tornado.platform.caresresolver.CaresResolver; fi - if [[ $TRAVIS_PYTHON_VERSION == 3* ]]; then python $TARGET --ioloop_time_monotonic; fi - if [[ $TRAVIS_PYTHON_VERSION != 'pypy3' ]]; then ../nodeps/bin/python -m tornado.test.runtests; fi # make coverage reports for Codecov to find - if [[ $TRAVIS_PYTHON_VERSION != nightly ]]; then coverage xml; fi - export TORNADO_EXTENSION=0 - if [[ $TRAVIS_PYTHON_VERSION == '3.5' || $TRAVIS_PYTHON_VERSION == 3.6 ]]; then cd ../docs && mkdir sphinx-out && sphinx-build -E -n -W -b html . sphinx-out; fi - if [[ $TRAVIS_PYTHON_VERSION == '2.7' || $TRAVIS_PYTHON_VERSION == '3.5' || $TRAVIS_PYTHON_VERSION == 3.6 ]]; then cd ../docs && mkdir sphinx-doctest-out && sphinx-build -E -n -b doctest . sphinx-out; fi after_success: # call codecov from project root - if [[ $TRAVIS_PYTHON_VERSION != nightly ]]; then cd ../ && codecov; fi # This reportedly works around an issue downloading packages from pypi on # travis. Consider removing this after the underlying issue is fixed. # https://github.com/travis-ci/travis-ci/issues/2389 sudo: false matrix: fast_finish: true tornado-4.5.3/LICENSE000066400000000000000000000261361322420601000141740ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. tornado-4.5.3/MANIFEST.in000066400000000000000000000014461322420601000147220ustar00rootroot00000000000000recursive-include demos *.py *.yaml *.html *.css *.js *.xml *.sql README recursive-include docs * prune docs/build include tornado/speedups.c include tornado/test/README include tornado/test/csv_translations/fr_FR.csv include tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo include tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po include tornado/test/options_test.cfg include tornado/test/static/robots.txt include tornado/test/static/sample.xml include tornado/test/static/sample.xml.gz include tornado/test/static/sample.xml.bz2 include tornado/test/static/dir/index.html include tornado/test/static_foo.txt include tornado/test/templates/utf8.html include tornado/test/test.crt include tornado/test/test.key include LICENSE include README.rst include runtests.sh tornado-4.5.3/README.rst000066400000000000000000000030411322420601000146440ustar00rootroot00000000000000Tornado Web Server ================== .. image:: https://badges.gitter.im/Join%20Chat.svg :alt: Join the chat at https://gitter.im/tornadoweb/tornado :target: https://gitter.im/tornadoweb/tornado?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge `Tornado `_ is a Python web framework and asynchronous networking library, originally developed at `FriendFeed `_. By using non-blocking network I/O, Tornado can scale to tens of thousands of open connections, making it ideal for `long polling `_, `WebSockets `_, and other applications that require a long-lived connection to each user. Hello, world ------------ Here is a simple "Hello, world" example web app for Tornado: .. code-block:: python import tornado.ioloop import tornado.web class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") def make_app(): return tornado.web.Application([ (r"/", MainHandler), ]) if __name__ == "__main__": app = make_app() app.listen(8888) tornado.ioloop.IOLoop.current().start() This example does not use any of Tornado's asynchronous features; for that see this `simple chat room `_. Documentation ------------- Documentation and links to additional resources are available at http://www.tornadoweb.org tornado-4.5.3/appveyor.yml000066400000000000000000000034701322420601000155530ustar00rootroot00000000000000# Appveyor is Windows CI: https://ci.appveyor.com/project/bdarnell/tornado environment: global: TORNADO_EXTENSION: "1" # We only build with 3.5+ because it works out of the box, while other # versions require lots of machinery. matrix: - PYTHON: "C:\\Python35" PYTHON_VERSION: "3.5.x" PYTHON_ARCH: "32" - PYTHON: "C:\\Python35-x64" PYTHON_VERSION: "3.5.x" PYTHON_ARCH: "64" - PYTHON: "C:\\Python36" PYTHON_VERSION: "3.6.x" PYTHON_ARCH: "32" - PYTHON: "C:\\Python36-x64" PYTHON_VERSION: "3.6.x" PYTHON_ARCH: "64" install: # Make sure the right python version is first on the PATH. - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" # Check that we have the expected version and architecture for Python - "python --version" - "python -c \"import struct; print(struct.calcsize('P') * 8)\"" # Upgrade to the latest version of pip to avoid it displaying warnings # about it being out of date. - "pip install --disable-pip-version-check --user --upgrade pip" - "pip install tox wheel" build: false # Not a C# project, build stuff at the test step instead. test_script: # Build the compiled extension and run the project tests. # This is a bit of a hack that doesn't scale with new python versions, # but for now it lets us avoid duplication with .travis.yml and tox.ini. # Running "py3x-full" would be nice but it's failing on installing # dependencies with no useful logs. - "tox -e py35,py36 --skip-missing-interpreters" after_test: # If tests are successful, create binary packages for the project. - "python setup.py bdist_wheel" - ps: "ls dist" artifacts: # Archive the generated packages in the ci.appveyor.com build report. - path: dist\* #on_success: # - TODO: upload the content of dist/*.whl to a public wheelhouse # tornado-4.5.3/codecov.yml000066400000000000000000000000451322420601000153230ustar00rootroot00000000000000comment: off coverage: status: off tornado-4.5.3/demos/000077500000000000000000000000001322420601000142665ustar00rootroot00000000000000tornado-4.5.3/demos/appengine/000077500000000000000000000000001322420601000162345ustar00rootroot00000000000000tornado-4.5.3/demos/appengine/README000066400000000000000000000031031322420601000171110ustar00rootroot00000000000000Running the Tornado AppEngine example ===================================== This example is designed to run in Google AppEngine, so there are a couple of steps to get it running. You can download the Google AppEngine Python development environment at http://code.google.com/appengine/downloads.html. 1. Link or copy the tornado code directory into this directory: ln -s ../../tornado tornado AppEngine doesn't use the Python modules installed on this machine. You need to have the 'tornado' module copied or linked for AppEngine to find it. 3. Install and run dev_appserver If you don't already have the App Engine SDK, download it from http://code.google.com/appengine/downloads.html To start the tornado demo, run the dev server on this directory: dev_appserver.py . 4. Visit http://localhost:8080/ in your browser If you sign in as an administrator, you will be able to create and edit blog posts. If you sign in as anybody else, you will only see the existing blog posts. If you want to deploy the blog in production: 1. Register a new appengine application and put its id in app.yaml First register a new application at http://appengine.google.com/. Then edit app.yaml in this directory and change the "application" setting from "tornado-appenginge" to your new application id. 2. Deploy to App Engine If you registered an application id, you can now upload your new Tornado blog by running this command: appcfg update . After that, visit application_id.appspot.com, where application_id is the application you registered. tornado-4.5.3/demos/appengine/app.yaml000066400000000000000000000002621322420601000177000ustar00rootroot00000000000000application: tornado-appengine version: 2 runtime: python27 api_version: 1 threadsafe: yes handlers: - url: /static/ static_dir: static - url: /.* script: blog.application tornado-4.5.3/demos/appengine/blog.py000066400000000000000000000124111322420601000175300ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os.path import re import tornado.escape import tornado.web import tornado.wsgi import unicodedata from google.appengine.api import users from google.appengine.ext import db class Entry(db.Model): """A single blog entry.""" author = db.UserProperty() title = db.StringProperty(required=True) slug = db.StringProperty(required=True) body_source = db.TextProperty(required=True) html = db.TextProperty(required=True) published = db.DateTimeProperty(auto_now_add=True) updated = db.DateTimeProperty(auto_now=True) def administrator(method): """Decorate with this method to restrict to site admins.""" @functools.wraps(method) def wrapper(self, *args, **kwargs): if not self.current_user: if self.request.method == "GET": self.redirect(self.get_login_url()) return raise tornado.web.HTTPError(403) elif not self.current_user.administrator: if self.request.method == "GET": self.redirect("/") return raise tornado.web.HTTPError(403) else: return method(self, *args, **kwargs) return wrapper class BaseHandler(tornado.web.RequestHandler): """Implements Google Accounts authentication methods.""" def get_current_user(self): user = users.get_current_user() if user: user.administrator = users.is_current_user_admin() return user def get_login_url(self): return users.create_login_url(self.request.uri) def get_template_namespace(self): # Let the templates access the users module to generate login URLs ns = super(BaseHandler, self).get_template_namespace() ns['users'] = users return ns class HomeHandler(BaseHandler): def get(self): entries = db.Query(Entry).order('-published').fetch(limit=5) if not entries: if not self.current_user or self.current_user.administrator: self.redirect("/compose") return self.render("home.html", entries=entries) class EntryHandler(BaseHandler): def get(self, slug): entry = db.Query(Entry).filter("slug =", slug).get() if not entry: raise tornado.web.HTTPError(404) self.render("entry.html", entry=entry) class ArchiveHandler(BaseHandler): def get(self): entries = db.Query(Entry).order('-published') self.render("archive.html", entries=entries) class FeedHandler(BaseHandler): def get(self): entries = db.Query(Entry).order('-published').fetch(limit=10) self.set_header("Content-Type", "application/atom+xml") self.render("feed.xml", entries=entries) class ComposeHandler(BaseHandler): @administrator def get(self): key = self.get_argument("key", None) entry = Entry.get(key) if key else None self.render("compose.html", entry=entry) @administrator def post(self): key = self.get_argument("key", None) if key: entry = Entry.get(key) entry.title = self.get_argument("title") entry.body_source = self.get_argument("body_source") entry.html = tornado.escape.linkify( self.get_argument("body_source")) else: title = self.get_argument("title") slug = unicodedata.normalize("NFKD", title).encode( "ascii", "ignore") slug = re.sub(r"[^\w]+", " ", slug) slug = "-".join(slug.lower().strip().split()) if not slug: slug = "entry" while True: existing = db.Query(Entry).filter("slug =", slug).get() if not existing or str(existing.key()) == key: break slug += "-2" entry = Entry( author=self.current_user, title=title, slug=slug, body_source=self.get_argument("body_source"), html=tornado.escape.linkify(self.get_argument("body_source")), ) entry.put() self.redirect("/entry/" + entry.slug) class EntryModule(tornado.web.UIModule): def render(self, entry): return self.render_string("modules/entry.html", entry=entry) settings = { "blog_title": u"Tornado Blog", "template_path": os.path.join(os.path.dirname(__file__), "templates"), "ui_modules": {"Entry": EntryModule}, "xsrf_cookies": True, } application = tornado.web.Application([ (r"/", HomeHandler), (r"/archive", ArchiveHandler), (r"/feed", FeedHandler), (r"/entry/([^/]+)", EntryHandler), (r"/compose", ComposeHandler), ], **settings) application = tornado.wsgi.WSGIAdapter(application) tornado-4.5.3/demos/appengine/static/000077500000000000000000000000001322420601000175235ustar00rootroot00000000000000tornado-4.5.3/demos/appengine/static/blog.css000066400000000000000000000041301322420601000211560ustar00rootroot00000000000000/* * Copyright 2009 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ body { background: white; color: black; margin: 15px; margin-top: 0; } body, input, textarea { font-family: Georgia, serif; font-size: 12pt; } table { border-collapse: collapse; border: 0; } td { border: 0; padding: 0; } h1, h2, h3, h4 { font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; margin: 0; } h1 { font-size: 20pt; } pre, code { font-family: monospace; color: #060; } pre { margin-left: 1em; padding-left: 1em; border-left: 1px solid silver; line-height: 14pt; } a, a code { color: #00c; } #body { max-width: 800px; margin: auto; } #header { background-color: #3b5998; padding: 5px; padding-left: 10px; padding-right: 10px; margin-bottom: 1em; } #header, #header a { color: white; } #header h1 a { text-decoration: none; } #footer, #content { margin-left: 10px; margin-right: 10px; } #footer { margin-top: 3em; } .entry h1 a { color: black; text-decoration: none; } .entry { margin-bottom: 2em; } .entry .date { margin-top: 3px; } .entry p { margin: 0; margin-bottom: 1em; } .entry .body { margin-top: 1em; line-height: 16pt; } .compose td { vertical-align: middle; padding-bottom: 5px; } .compose td.field { padding-right: 10px; } .compose .title, .compose .submit { font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; font-weight: bold; } .compose .title { font-size: 20pt; } .compose .title, .compose .body_source { width: 100%; } .compose .body_source { height: 500px; line-height: 16pt; } tornado-4.5.3/demos/appengine/templates/000077500000000000000000000000001322420601000202325ustar00rootroot00000000000000tornado-4.5.3/demos/appengine/templates/archive.html000066400000000000000000000012411322420601000225370ustar00rootroot00000000000000{% extends "base.html" %} {% block head %} {% end %} {% block body %}
    {% for entry in entries %}
  • {{ locale.format_date(entry.published, full_format=True, shorter=True) }}
  • {% end %}
{% end %} tornado-4.5.3/demos/appengine/templates/base.html000066400000000000000000000020451322420601000220330ustar00rootroot00000000000000 {{ handler.settings["blog_title"] }} {% block head %}{% end %}
{% block body %}{% end %}
{% block bottom %}{% end %} tornado-4.5.3/demos/appengine/templates/compose.html000066400000000000000000000026341322420601000225720ustar00rootroot00000000000000{% extends "base.html" %} {% block body %}
{% if entry %} {% end %} {% module xsrf_form_html() %}
{% end %} {% block bottom %} {% end %} tornado-4.5.3/demos/appengine/templates/entry.html000066400000000000000000000001221322420601000222540ustar00rootroot00000000000000{% extends "base.html" %} {% block body %} {% module Entry(entry) %} {% end %} tornado-4.5.3/demos/appengine/templates/feed.xml000066400000000000000000000025011322420601000216550ustar00rootroot00000000000000 {% set date_format = "%Y-%m-%dT%H:%M:%SZ" %} {{ handler.settings["blog_title"] }} {% if len(entries) > 0 %} {{ max(e.updated for e in entries).strftime(date_format) }} {% else %} {{ datetime.datetime.utcnow().strftime(date_format) }} {% end %} http://{{ request.host }}/ {{ handler.settings["blog_title"] }} {% for entry in entries %} http://{{ request.host }}/entry/{{ entry.slug }} {{ entry.title }} {{ entry.updated.strftime(date_format) }} {{ entry.published.strftime(date_format) }}
{% raw entry.html %}
{% end %}
tornado-4.5.3/demos/appengine/templates/home.html000066400000000000000000000002641322420601000220520ustar00rootroot00000000000000{% extends "base.html" %} {% block body %} {% for entry in entries %} {% module Entry(entry) %} {% end %} {% end %} tornado-4.5.3/demos/appengine/templates/modules/000077500000000000000000000000001322420601000217025ustar00rootroot00000000000000tornado-4.5.3/demos/appengine/templates/modules/entry.html000066400000000000000000000006351322420601000237350ustar00rootroot00000000000000

{{ entry.title }}

{{ locale.format_date(entry.published, full_format=True, shorter=True) }}
{% raw entry.html %}
{% if current_user and current_user.administrator %} {% end %}
tornado-4.5.3/demos/benchmark/000077500000000000000000000000001322420601000162205ustar00rootroot00000000000000tornado-4.5.3/demos/benchmark/benchmark.py000077500000000000000000000045721322420601000205370ustar00rootroot00000000000000#!/usr/bin/env python # # A simple benchmark of tornado's HTTP stack. # Requires 'ab' to be installed. # # Running without profiling: # demos/benchmark/benchmark.py # demos/benchmark/benchmark.py --quiet --num_runs=5|grep "Requests per second" # # Running with profiling: # # python -m cProfile -o /tmp/prof demos/benchmark/benchmark.py # python -m pstats /tmp/prof # % sort time # % stats 20 from tornado.ioloop import IOLoop from tornado.options import define, options, parse_command_line from tornado.web import RequestHandler, Application import random import signal import subprocess try: xrange except NameError: xrange = range # choose a random port to avoid colliding with TIME_WAIT sockets left over # from previous runs. define("min_port", type=int, default=8000) define("max_port", type=int, default=9000) # Increasing --n without --keepalive will eventually run into problems # due to TIME_WAIT sockets define("n", type=int, default=15000) define("c", type=int, default=25) define("keepalive", type=bool, default=False) define("quiet", type=bool, default=False) # Repeat the entire benchmark this many times (on different ports) # This gives JITs time to warm up, etc. Pypy needs 3-5 runs at # --n=15000 for its JIT to reach full effectiveness define("num_runs", type=int, default=1) define("ioloop", type=str, default=None) class RootHandler(RequestHandler): def get(self): self.write("Hello, world") def _log(self): pass def handle_sigchld(sig, frame): IOLoop.current().add_callback_from_signal(IOLoop.current().stop) def main(): parse_command_line() if options.ioloop: IOLoop.configure(options.ioloop) for i in xrange(options.num_runs): run() def run(): io_loop = IOLoop(make_current=True) app = Application([("/", RootHandler)]) port = random.randrange(options.min_port, options.max_port) app.listen(port, address='127.0.0.1') signal.signal(signal.SIGCHLD, handle_sigchld) args = ["ab"] args.extend(["-n", str(options.n)]) args.extend(["-c", str(options.c)]) if options.keepalive: args.append("-k") if options.quiet: # just stops the progress messages printed to stderr args.append("-q") args.append("http://127.0.0.1:%d/" % port) subprocess.Popen(args) io_loop.start() io_loop.close() io_loop.clear_current() if __name__ == '__main__': main() tornado-4.5.3/demos/benchmark/chunk_benchmark.py000077500000000000000000000030401322420601000217140ustar00rootroot00000000000000#!/usr/bin/env python # # Downloads a large file in chunked encoding with both curl and simple clients import logging from tornado.curl_httpclient import CurlAsyncHTTPClient from tornado.simple_httpclient import SimpleAsyncHTTPClient from tornado.ioloop import IOLoop from tornado.options import define, options, parse_command_line from tornado.web import RequestHandler, Application define('port', default=8888) define('num_chunks', default=1000) define('chunk_size', default=2048) class ChunkHandler(RequestHandler): def get(self): for i in xrange(options.num_chunks): self.write('A' * options.chunk_size) self.flush() self.finish() def main(): parse_command_line() app = Application([('/', ChunkHandler)]) app.listen(options.port, address='127.0.0.1') def callback(response): response.rethrow() assert len(response.body) == (options.num_chunks * options.chunk_size) logging.warning("fetch completed in %s seconds", response.request_time) IOLoop.current().stop() logging.warning("Starting fetch with curl client") curl_client = CurlAsyncHTTPClient() curl_client.fetch('http://localhost:%d/' % options.port, callback=callback) IOLoop.current().start() logging.warning("Starting fetch with simple client") simple_client = SimpleAsyncHTTPClient() simple_client.fetch('http://localhost:%d/' % options.port, callback=callback) IOLoop.current().start() if __name__ == '__main__': main() tornado-4.5.3/demos/benchmark/gen_benchmark.py000066400000000000000000000022451322420601000213600ustar00rootroot00000000000000#!/usr/bin/env python # # A simple benchmark of the tornado.gen module. # Runs in two modes, testing new-style (@coroutine and Futures) # and old-style (@engine and Tasks) coroutines. from timeit import Timer from tornado import gen from tornado.options import options, define, parse_command_line define('num', default=10000, help='number of iterations') # These benchmarks are delicate. They hit various fast-paths in the gen # machinery in order to stay synchronous so we don't need an IOLoop. # This removes noise from the results, but it's easy to change things # in a way that completely invalidates the results. @gen.engine def e2(callback): callback() @gen.engine def e1(): for i in range(10): yield gen.Task(e2) @gen.coroutine def c2(): pass @gen.coroutine def c1(): for i in range(10): yield c2() def main(): parse_command_line() t = Timer(e1) results = t.timeit(options.num) / options.num print('engine: %0.3f ms per iteration' % (results * 1000)) t = Timer(c1) results = t.timeit(options.num) / options.num print('coroutine: %0.3f ms per iteration' % (results * 1000)) if __name__ == '__main__': main() tornado-4.5.3/demos/benchmark/stack_context_benchmark.py000077500000000000000000000043311322420601000234610ustar00rootroot00000000000000#!/usr/bin/env python """Benchmark for stack_context functionality.""" import collections import contextlib import functools import subprocess import sys from tornado import stack_context class Benchmark(object): def enter_exit(self, count): """Measures the overhead of the nested "with" statements when using many contexts. """ if count < 0: return with self.make_context(): self.enter_exit(count - 1) def call_wrapped(self, count): """Wraps and calls a function at each level of stack depth to measure the overhead of the wrapped function. """ # This queue is analogous to IOLoop.add_callback, but lets us # benchmark the stack_context in isolation without system call # overhead. queue = collections.deque() self.call_wrapped_inner(queue, count) while queue: queue.popleft()() def call_wrapped_inner(self, queue, count): if count < 0: return with self.make_context(): queue.append(stack_context.wrap( functools.partial(self.call_wrapped_inner, queue, count - 1))) class StackBenchmark(Benchmark): def make_context(self): return stack_context.StackContext(self.__context) @contextlib.contextmanager def __context(self): yield class ExceptionBenchmark(Benchmark): def make_context(self): return stack_context.ExceptionStackContext(self.__handle_exception) def __handle_exception(self, typ, value, tb): pass def main(): base_cmd = [ sys.executable, '-m', 'timeit', '-s', 'from stack_context_benchmark import StackBenchmark, ExceptionBenchmark'] cmds = [ 'StackBenchmark().enter_exit(50)', 'StackBenchmark().call_wrapped(50)', 'StackBenchmark().enter_exit(500)', 'StackBenchmark().call_wrapped(500)', 'ExceptionBenchmark().enter_exit(50)', 'ExceptionBenchmark().call_wrapped(50)', 'ExceptionBenchmark().enter_exit(500)', 'ExceptionBenchmark().call_wrapped(500)', ] for cmd in cmds: print(cmd) subprocess.check_call(base_cmd + [cmd]) if __name__ == '__main__': main() tornado-4.5.3/demos/benchmark/template_benchmark.py000077500000000000000000000030331322420601000224210ustar00rootroot00000000000000#!/usr/bin/env python # # A simple benchmark of tornado template rendering, based on # https://github.com/mitsuhiko/jinja2/blob/master/examples/bench.py import sys from timeit import Timer from tornado.options import options, define, parse_command_line from tornado.template import Template define('num', default=100, help='number of iterations') define('dump', default=False, help='print template generated code and exit') context = { 'page_title': 'mitsuhiko\'s benchmark', 'table': [dict(a=1,b=2,c=3,d=4,e=5,f=6,g=7,h=8,i=9,j=10) for x in range(1000)] } tmpl = Template("""\ {{ page_title }}

{{ page_title }}

{% for row in table %} {% for cell in row %} {% end %} {% end %}
{{ cell }}
\ """) def render(): tmpl.generate(**context) def main(): parse_command_line() if options.dump: print(tmpl.code) sys.exit(0) t = Timer(render) results = t.timeit(options.num) / options.num print('%0.3f ms per iteration' % (results*1000)) if __name__ == '__main__': main() tornado-4.5.3/demos/blog/000077500000000000000000000000001322420601000152115ustar00rootroot00000000000000tornado-4.5.3/demos/blog/Dockerfile000066400000000000000000000005621322420601000172060ustar00rootroot00000000000000FROM python:2.7 EXPOSE 8888 RUN apt-get update && apt-get install -y mysql-client # based on python:2.7-onbuild, but if we use that image directly # the above apt-get line runs too late. RUN mkdir -p /usr/src/app WORKDIR /usr/src/app COPY requirements.txt /usr/src/app/ RUN pip install -r requirements.txt COPY . /usr/src/app CMD python blog.py --mysql_host=mysql tornado-4.5.3/demos/blog/README000066400000000000000000000046621322420601000161010ustar00rootroot00000000000000Running the Tornado Blog example app ==================================== This demo is a simple blogging engine that uses MySQL to store posts and Google Accounts for author authentication. Since it depends on MySQL, you need to set up MySQL and the database schema for the demo to run. If you have `docker` and `docker-compose` installed, the demo and all its prerequisites can be installed with `docker-compose up`. 1. Install prerequisites and build tornado See http://www.tornadoweb.org/ for installation instructions. If you can run the "helloworld" example application, your environment is set up correctly. 2. Install MySQL if needed Consult the documentation for your platform. Under Ubuntu Linux you can run "apt-get install mysql". Under OS X you can download the MySQL PKG file from http://dev.mysql.com/downloads/mysql/ 3. Install Python prerequisites Install the packages MySQL-python, torndb, and markdown (e.g. using pip or easy_install). Note that these packages currently only work on Python 2. Tornado supports Python 3, but this blog demo does not. 3. Connect to MySQL and create a database and user for the blog. Connect to MySQL as a user that can create databases and users: mysql -u root Create a database named "blog": mysql> CREATE DATABASE blog; Allow the "blog" user to connect with the password "blog": mysql> GRANT ALL PRIVILEGES ON blog.* TO 'blog'@'localhost' IDENTIFIED BY 'blog'; 4. Create the tables in your new database. You can use the provided schema.sql file by running this command: mysql --user=blog --password=blog --database=blog < schema.sql You can run the above command again later if you want to delete the contents of the blog and start over after testing. 5. Run the blog example With the default user, password, and database you can just run: ./blog.py If you've changed anything, you can alter the default MySQL settings with arguments on the command line, e.g.: ./blog.py --mysql_user=casey --mysql_password=happiness --mysql_database=foodblog 6. Visit your new blog Open http://localhost:8888/ in your web browser. You will be redirected to a Google account sign-in page because the blog uses Google accounts for authentication. Currently the first user to connect will automatically be given the ability to create and edit posts. Once you've created one blog post, subsequent users will not be prompted to sign in. tornado-4.5.3/demos/blog/blog.py000077500000000000000000000204771322420601000165230ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import bcrypt import concurrent.futures import MySQLdb import markdown import os.path import re import subprocess import torndb import tornado.escape from tornado import gen import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web import unicodedata from tornado.options import define, options define("port", default=8888, help="run on the given port", type=int) define("mysql_host", default="127.0.0.1:3306", help="blog database host") define("mysql_database", default="blog", help="blog database name") define("mysql_user", default="blog", help="blog database user") define("mysql_password", default="blog", help="blog database password") # A thread pool to be used for password hashing with bcrypt. executor = concurrent.futures.ThreadPoolExecutor(2) class Application(tornado.web.Application): def __init__(self): handlers = [ (r"/", HomeHandler), (r"/archive", ArchiveHandler), (r"/feed", FeedHandler), (r"/entry/([^/]+)", EntryHandler), (r"/compose", ComposeHandler), (r"/auth/create", AuthCreateHandler), (r"/auth/login", AuthLoginHandler), (r"/auth/logout", AuthLogoutHandler), ] settings = dict( blog_title=u"Tornado Blog", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), ui_modules={"Entry": EntryModule}, xsrf_cookies=True, cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", login_url="/auth/login", debug=True, ) super(Application, self).__init__(handlers, **settings) # Have one global connection to the blog DB across all handlers self.db = torndb.Connection( host=options.mysql_host, database=options.mysql_database, user=options.mysql_user, password=options.mysql_password) self.maybe_create_tables() def maybe_create_tables(self): try: self.db.get("SELECT COUNT(*) from entries;") except MySQLdb.ProgrammingError: subprocess.check_call(['mysql', '--host=' + options.mysql_host, '--database=' + options.mysql_database, '--user=' + options.mysql_user, '--password=' + options.mysql_password], stdin=open('schema.sql')) class BaseHandler(tornado.web.RequestHandler): @property def db(self): return self.application.db def get_current_user(self): user_id = self.get_secure_cookie("blogdemo_user") if not user_id: return None return self.db.get("SELECT * FROM authors WHERE id = %s", int(user_id)) def any_author_exists(self): return bool(self.db.get("SELECT * FROM authors LIMIT 1")) class HomeHandler(BaseHandler): def get(self): entries = self.db.query("SELECT * FROM entries ORDER BY published " "DESC LIMIT 5") if not entries: self.redirect("/compose") return self.render("home.html", entries=entries) class EntryHandler(BaseHandler): def get(self, slug): entry = self.db.get("SELECT * FROM entries WHERE slug = %s", slug) if not entry: raise tornado.web.HTTPError(404) self.render("entry.html", entry=entry) class ArchiveHandler(BaseHandler): def get(self): entries = self.db.query("SELECT * FROM entries ORDER BY published " "DESC") self.render("archive.html", entries=entries) class FeedHandler(BaseHandler): def get(self): entries = self.db.query("SELECT * FROM entries ORDER BY published " "DESC LIMIT 10") self.set_header("Content-Type", "application/atom+xml") self.render("feed.xml", entries=entries) class ComposeHandler(BaseHandler): @tornado.web.authenticated def get(self): id = self.get_argument("id", None) entry = None if id: entry = self.db.get("SELECT * FROM entries WHERE id = %s", int(id)) self.render("compose.html", entry=entry) @tornado.web.authenticated def post(self): id = self.get_argument("id", None) title = self.get_argument("title") text = self.get_argument("markdown") html = markdown.markdown(text) if id: entry = self.db.get("SELECT * FROM entries WHERE id = %s", int(id)) if not entry: raise tornado.web.HTTPError(404) slug = entry.slug self.db.execute( "UPDATE entries SET title = %s, markdown = %s, html = %s " "WHERE id = %s", title, text, html, int(id)) else: slug = unicodedata.normalize("NFKD", title).encode( "ascii", "ignore") slug = re.sub(r"[^\w]+", " ", slug) slug = "-".join(slug.lower().strip().split()) if not slug: slug = "entry" while True: e = self.db.get("SELECT * FROM entries WHERE slug = %s", slug) if not e: break slug += "-2" self.db.execute( "INSERT INTO entries (author_id,title,slug,markdown,html," "published) VALUES (%s,%s,%s,%s,%s,UTC_TIMESTAMP())", self.current_user.id, title, slug, text, html) self.redirect("/entry/" + slug) class AuthCreateHandler(BaseHandler): def get(self): self.render("create_author.html") @gen.coroutine def post(self): if self.any_author_exists(): raise tornado.web.HTTPError(400, "author already created") hashed_password = yield executor.submit( bcrypt.hashpw, tornado.escape.utf8(self.get_argument("password")), bcrypt.gensalt()) author_id = self.db.execute( "INSERT INTO authors (email, name, hashed_password) " "VALUES (%s, %s, %s)", self.get_argument("email"), self.get_argument("name"), hashed_password) self.set_secure_cookie("blogdemo_user", str(author_id)) self.redirect(self.get_argument("next", "/")) class AuthLoginHandler(BaseHandler): def get(self): # If there are no authors, redirect to the account creation page. if not self.any_author_exists(): self.redirect("/auth/create") else: self.render("login.html", error=None) @gen.coroutine def post(self): author = self.db.get("SELECT * FROM authors WHERE email = %s", self.get_argument("email")) if not author: self.render("login.html", error="email not found") return hashed_password = yield executor.submit( bcrypt.hashpw, tornado.escape.utf8(self.get_argument("password")), tornado.escape.utf8(author.hashed_password)) if hashed_password == author.hashed_password: self.set_secure_cookie("blogdemo_user", str(author.id)) self.redirect(self.get_argument("next", "/")) else: self.render("login.html", error="incorrect password") class AuthLogoutHandler(BaseHandler): def get(self): self.clear_cookie("blogdemo_user") self.redirect(self.get_argument("next", "/")) class EntryModule(tornado.web.UIModule): def render(self, entry): return self.render_string("modules/entry.html", entry=entry) def main(): tornado.options.parse_command_line() http_server = tornado.httpserver.HTTPServer(Application()) http_server.listen(options.port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main() tornado-4.5.3/demos/blog/docker-compose.yml000066400000000000000000000003721322420601000206500ustar00rootroot00000000000000mysql: image: mysql:5.6 environment: MYSQL_ROOT_PASSWORD: its_a_secret_to_everybody MYSQL_USER: blog MYSQL_PASSWORD: blog MYSQL_DATABASE: blog ports: - "3306" blog: build: . links: - mysql ports: - "8888:8888" tornado-4.5.3/demos/blog/requirements.txt000066400000000000000000000000641322420601000204750ustar00rootroot00000000000000bcrypt futures MySQL-python markdown tornado torndb tornado-4.5.3/demos/blog/schema.sql000066400000000000000000000027641322420601000172030ustar00rootroot00000000000000-- Copyright 2009 FriendFeed -- -- Licensed under the Apache License, Version 2.0 (the "License"); you may -- not use this file except in compliance with the License. You may obtain -- a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -- License for the specific language governing permissions and limitations -- under the License. -- To create the database: -- CREATE DATABASE blog; -- GRANT ALL PRIVILEGES ON blog.* TO 'blog'@'localhost' IDENTIFIED BY 'blog'; -- -- To reload the tables: -- mysql --user=blog --password=blog --database=blog < schema.sql SET SESSION storage_engine = "InnoDB"; SET SESSION time_zone = "+0:00"; ALTER DATABASE CHARACTER SET "utf8"; DROP TABLE IF EXISTS entries; CREATE TABLE entries ( id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, author_id INT NOT NULL REFERENCES authors(id), slug VARCHAR(100) NOT NULL UNIQUE, title VARCHAR(512) NOT NULL, markdown MEDIUMTEXT NOT NULL, html MEDIUMTEXT NOT NULL, published DATETIME NOT NULL, updated TIMESTAMP NOT NULL, KEY (published) ); DROP TABLE IF EXISTS authors; CREATE TABLE authors ( id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, email VARCHAR(100) NOT NULL UNIQUE, name VARCHAR(100) NOT NULL, hashed_password VARCHAR(100) NOT NULL ); tornado-4.5.3/demos/blog/static/000077500000000000000000000000001322420601000165005ustar00rootroot00000000000000tornado-4.5.3/demos/blog/static/blog.css000066400000000000000000000041221322420601000201340ustar00rootroot00000000000000/* * Copyright 2009 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ body { background: white; color: black; margin: 15px; margin-top: 0; } body, input, textarea { font-family: Georgia, serif; font-size: 12pt; } table { border-collapse: collapse; border: 0; } td { border: 0; padding: 0; } h1, h2, h3, h4 { font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; margin: 0; } h1 { font-size: 20pt; } pre, code { font-family: monospace; color: #060; } pre { margin-left: 1em; padding-left: 1em; border-left: 1px solid silver; line-height: 14pt; } a, a code { color: #00c; } #body { max-width: 800px; margin: auto; } #header { background-color: #3b5998; padding: 5px; padding-left: 10px; padding-right: 10px; margin-bottom: 1em; } #header, #header a { color: white; } #header h1 a { text-decoration: none; } #footer, #content { margin-left: 10px; margin-right: 10px; } #footer { margin-top: 3em; } .entry h1 a { color: black; text-decoration: none; } .entry { margin-bottom: 2em; } .entry .date { margin-top: 3px; } .entry p { margin: 0; margin-bottom: 1em; } .entry .body { margin-top: 1em; line-height: 16pt; } .compose td { vertical-align: middle; padding-bottom: 5px; } .compose td.field { padding-right: 10px; } .compose .title, .compose .submit { font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; font-weight: bold; } .compose .title { font-size: 20pt; } .compose .title, .compose .markdown { width: 100%; } .compose .markdown { height: 500px; line-height: 16pt; } tornado-4.5.3/demos/blog/templates/000077500000000000000000000000001322420601000172075ustar00rootroot00000000000000tornado-4.5.3/demos/blog/templates/archive.html000066400000000000000000000012411322420601000215140ustar00rootroot00000000000000{% extends "base.html" %} {% block head %} {% end %} {% block body %}
    {% for entry in entries %}
  • {{ locale.format_date(entry.published, full_format=True, shorter=True) }}
  • {% end %}
{% end %} tornado-4.5.3/demos/blog/templates/base.html000066400000000000000000000020041322420601000210030ustar00rootroot00000000000000 {{ escape(handler.settings["blog_title"]) }} {% block head %}{% end %}
{% block body %}{% end %}
{% block bottom %}{% end %} tornado-4.5.3/demos/blog/templates/compose.html000066400000000000000000000030151322420601000215410ustar00rootroot00000000000000{% extends "base.html" %} {% block body %}
{% if entry %} {% end %} {% module xsrf_form_html() %}
{% end %} {% block bottom %} {% end %} tornado-4.5.3/demos/blog/templates/create_author.html000066400000000000000000000004631322420601000227250ustar00rootroot00000000000000{% extends "base.html" %} {% block body %}
Email:
Name:
Password:
{% module xsrf_form_html() %}
{% end %} tornado-4.5.3/demos/blog/templates/entry.html000066400000000000000000000001221322420601000212310ustar00rootroot00000000000000{% extends "base.html" %} {% block body %} {% module Entry(entry) %} {% end %} tornado-4.5.3/demos/blog/templates/feed.xml000066400000000000000000000025011322420601000206320ustar00rootroot00000000000000 {% set date_format = "%Y-%m-%dT%H:%M:%SZ" %} {{ handler.settings["blog_title"] }} {% if len(entries) > 0 %} {{ max(e.updated for e in entries).strftime(date_format) }} {% else %} {{ datetime.datetime.utcnow().strftime(date_format) }} {% end %} http://{{ request.host }}/ {{ handler.settings["blog_title"] }} {% for entry in entries %} http://{{ request.host }}/entry/{{ entry.slug }} {{ entry.title }} {{ entry.updated.strftime(date_format) }} {{ entry.published.strftime(date_format) }}
{% raw entry.html %}
{% end %}
tornado-4.5.3/demos/blog/templates/home.html000066400000000000000000000002641322420601000210270ustar00rootroot00000000000000{% extends "base.html" %} {% block body %} {% for entry in entries %} {% module Entry(entry) %} {% end %} {% end %} tornado-4.5.3/demos/blog/templates/login.html000066400000000000000000000005261322420601000212100ustar00rootroot00000000000000{% extends "base.html" %} {% block body %} {% if error %} Error: {{ error }}

{% end %}

Email:
Password:
{% module xsrf_form_html() %}
{% end %} tornado-4.5.3/demos/blog/templates/modules/000077500000000000000000000000001322420601000206575ustar00rootroot00000000000000tornado-4.5.3/demos/blog/templates/modules/entry.html000066400000000000000000000005651322420601000227140ustar00rootroot00000000000000

{{ entry.title }}

{{ locale.format_date(entry.published, full_format=True, shorter=True) }}
{% raw entry.html %}
{% if current_user %} {% end %}
tornado-4.5.3/demos/chat/000077500000000000000000000000001322420601000152055ustar00rootroot00000000000000tornado-4.5.3/demos/chat/chatdemo.py000077500000000000000000000105121322420601000173450ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import tornado.escape import tornado.ioloop import tornado.web import os.path import uuid from tornado.concurrent import Future from tornado import gen from tornado.options import define, options, parse_command_line define("port", default=8888, help="run on the given port", type=int) define("debug", default=False, help="run in debug mode") class MessageBuffer(object): def __init__(self): self.waiters = set() self.cache = [] self.cache_size = 200 def wait_for_messages(self, cursor=None): # Construct a Future to return to our caller. This allows # wait_for_messages to be yielded from a coroutine even though # it is not a coroutine itself. We will set the result of the # Future when results are available. result_future = Future() if cursor: new_count = 0 for msg in reversed(self.cache): if msg["id"] == cursor: break new_count += 1 if new_count: result_future.set_result(self.cache[-new_count:]) return result_future self.waiters.add(result_future) return result_future def cancel_wait(self, future): self.waiters.remove(future) # Set an empty result to unblock any coroutines waiting. future.set_result([]) def new_messages(self, messages): logging.info("Sending new message to %r listeners", len(self.waiters)) for future in self.waiters: future.set_result(messages) self.waiters = set() self.cache.extend(messages) if len(self.cache) > self.cache_size: self.cache = self.cache[-self.cache_size:] # Making this a non-singleton is left as an exercise for the reader. global_message_buffer = MessageBuffer() class MainHandler(tornado.web.RequestHandler): def get(self): self.render("index.html", messages=global_message_buffer.cache) class MessageNewHandler(tornado.web.RequestHandler): def post(self): message = { "id": str(uuid.uuid4()), "body": self.get_argument("body"), } # to_basestring is necessary for Python 3's json encoder, # which doesn't accept byte strings. message["html"] = tornado.escape.to_basestring( self.render_string("message.html", message=message)) if self.get_argument("next", None): self.redirect(self.get_argument("next")) else: self.write(message) global_message_buffer.new_messages([message]) class MessageUpdatesHandler(tornado.web.RequestHandler): @gen.coroutine def post(self): cursor = self.get_argument("cursor", None) # Save the future returned by wait_for_messages so we can cancel # it in wait_for_messages self.future = global_message_buffer.wait_for_messages(cursor=cursor) messages = yield self.future if self.request.connection.stream.closed(): return self.write(dict(messages=messages)) def on_connection_close(self): global_message_buffer.cancel_wait(self.future) def main(): parse_command_line() app = tornado.web.Application( [ (r"/", MainHandler), (r"/a/message/new", MessageNewHandler), (r"/a/message/updates", MessageUpdatesHandler), ], cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), xsrf_cookies=True, debug=options.debug, ) app.listen(options.port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main() tornado-4.5.3/demos/chat/static/000077500000000000000000000000001322420601000164745ustar00rootroot00000000000000tornado-4.5.3/demos/chat/static/chat.css000066400000000000000000000017351322420601000201330ustar00rootroot00000000000000/* * Copyright 2009 FriendFeed * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ body { background: white; margin: 10px; } body, input { font-family: sans-serif; font-size: 10pt; color: black; } table { border-collapse: collapse; border: 0; } td { border: 0; padding: 0; } #body { position: absolute; bottom: 10px; left: 10px; } #input { margin-top: 0.5em; } #inbox .message { padding-top: 0.25em; } #nav { float: right; z-index: 99; } tornado-4.5.3/demos/chat/static/chat.js000066400000000000000000000077531322420601000177650ustar00rootroot00000000000000// Copyright 2009 FriendFeed // // Licensed under the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. $(document).ready(function() { if (!window.console) window.console = {}; if (!window.console.log) window.console.log = function() {}; $("#messageform").on("submit", function() { newMessage($(this)); return false; }); $("#messageform").on("keypress", function(e) { if (e.keyCode == 13) { newMessage($(this)); return false; } return true; }); $("#message").select(); updater.poll(); }); function newMessage(form) { var message = form.formToDict(); var disabled = form.find("input[type=submit]"); disabled.disable(); $.postJSON("/a/message/new", message, function(response) { updater.showMessage(response); if (message.id) { form.parent().remove(); } else { form.find("input[type=text]").val("").select(); disabled.enable(); } }); } function getCookie(name) { var r = document.cookie.match("\\b" + name + "=([^;]*)\\b"); return r ? r[1] : undefined; } jQuery.postJSON = function(url, args, callback) { args._xsrf = getCookie("_xsrf"); $.ajax({url: url, data: $.param(args), dataType: "text", type: "POST", success: function(response) { if (callback) callback(eval("(" + response + ")")); }, error: function(response) { console.log("ERROR:", response); }}); }; jQuery.fn.formToDict = function() { var fields = this.serializeArray(); var json = {}; for (var i = 0; i < fields.length; i++) { json[fields[i].name] = fields[i].value; } if (json.next) delete json.next; return json; }; jQuery.fn.disable = function() { this.enable(false); return this; }; jQuery.fn.enable = function(opt_enable) { if (arguments.length && !opt_enable) { this.attr("disabled", "disabled"); } else { this.removeAttr("disabled"); } return this; }; var updater = { errorSleepTime: 500, cursor: null, poll: function() { var args = {"_xsrf": getCookie("_xsrf")}; if (updater.cursor) args.cursor = updater.cursor; $.ajax({url: "/a/message/updates", type: "POST", dataType: "text", data: $.param(args), success: updater.onSuccess, error: updater.onError}); }, onSuccess: function(response) { try { updater.newMessages(eval("(" + response + ")")); } catch (e) { updater.onError(); return; } updater.errorSleepTime = 500; window.setTimeout(updater.poll, 0); }, onError: function(response) { updater.errorSleepTime *= 2; console.log("Poll error; sleeping for", updater.errorSleepTime, "ms"); window.setTimeout(updater.poll, updater.errorSleepTime); }, newMessages: function(response) { if (!response.messages) return; updater.cursor = response.cursor; var messages = response.messages; updater.cursor = messages[messages.length - 1].id; console.log(messages.length, "new messages, cursor:", updater.cursor); for (var i = 0; i < messages.length; i++) { updater.showMessage(messages[i]); } }, showMessage: function(message) { var existing = $("#m" + message.id); if (existing.length > 0) return; var node = $(message.html); node.hide(); $("#inbox").append(node); node.slideDown(); } }; tornado-4.5.3/demos/chat/templates/000077500000000000000000000000001322420601000172035ustar00rootroot00000000000000tornado-4.5.3/demos/chat/templates/index.html000066400000000000000000000021471322420601000212040ustar00rootroot00000000000000 Tornado Chat Demo
{% for message in messages %} {% module Template("message.html", message=message) %} {% end %}
{% module xsrf_form_html() %}
tornado-4.5.3/demos/chat/templates/message.html000066400000000000000000000001331322420601000215120ustar00rootroot00000000000000
{% module linkify(message["body"]) %}
tornado-4.5.3/demos/facebook/000077500000000000000000000000001322420601000160375ustar00rootroot00000000000000tornado-4.5.3/demos/facebook/README000066400000000000000000000006171322420601000167230ustar00rootroot00000000000000Running the Tornado Facebook example ==================================== To run this example, you must register a Facebook application with a Connect URL set to the domain the this demo will be running on (i.e. http://localhost:8888/ by default). The API key and secret for this application must be passed on the command line: python facebook.py --facebook_api_key=ABC --facebook_secret=XYZ tornado-4.5.3/demos/facebook/facebook.py000077500000000000000000000104741322420601000201730ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import tornado.auth import tornado.escape import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web from tornado.options import define, options define("port", default=8888, help="run on the given port", type=int) define("facebook_api_key", help="your Facebook application API key", type=str) define("facebook_secret", help="your Facebook application secret", type=str) class Application(tornado.web.Application): def __init__(self): handlers = [ (r"/", MainHandler), (r"/auth/login", AuthLoginHandler), (r"/auth/logout", AuthLogoutHandler), ] settings = dict( cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", login_url="/auth/login", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), xsrf_cookies=True, facebook_api_key=options.facebook_api_key, facebook_secret=options.facebook_secret, ui_modules={"Post": PostModule}, debug=True, autoescape=None, ) tornado.web.Application.__init__(self, handlers, **settings) class BaseHandler(tornado.web.RequestHandler): def get_current_user(self): user_json = self.get_secure_cookie("fbdemo_user") if not user_json: return None return tornado.escape.json_decode(user_json) class MainHandler(BaseHandler, tornado.auth.FacebookGraphMixin): @tornado.web.authenticated @tornado.web.asynchronous def get(self): self.facebook_request("/me/home", self._on_stream, access_token=self.current_user["access_token"]) def _on_stream(self, stream): if stream is None: # Session may have expired self.redirect("/auth/login") return self.render("stream.html", stream=stream) class AuthLoginHandler(BaseHandler, tornado.auth.FacebookGraphMixin): @tornado.web.asynchronous def get(self): my_url = (self.request.protocol + "://" + self.request.host + "/auth/login?next=" + tornado.escape.url_escape(self.get_argument("next", "/"))) if self.get_argument("code", False): self.get_authenticated_user( redirect_uri=my_url, client_id=self.settings["facebook_api_key"], client_secret=self.settings["facebook_secret"], code=self.get_argument("code"), callback=self._on_auth) return self.authorize_redirect(redirect_uri=my_url, client_id=self.settings["facebook_api_key"], extra_params={"scope": "user_posts"}) def _on_auth(self, user): if not user: raise tornado.web.HTTPError(500, "Facebook auth failed") self.set_secure_cookie("fbdemo_user", tornado.escape.json_encode(user)) self.redirect(self.get_argument("next", "/")) class AuthLogoutHandler(BaseHandler, tornado.auth.FacebookGraphMixin): def get(self): self.clear_cookie("fbdemo_user") self.redirect(self.get_argument("next", "/")) class PostModule(tornado.web.UIModule): def render(self, post): return self.render_string("modules/post.html", post=post) def main(): tornado.options.parse_command_line() if not (options.facebook_api_key and options.facebook_secret): print("--facebook_api_key and --facebook_secret must be set") return http_server = tornado.httpserver.HTTPServer(Application()) http_server.listen(options.port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main() tornado-4.5.3/demos/facebook/static/000077500000000000000000000000001322420601000173265ustar00rootroot00000000000000tornado-4.5.3/demos/facebook/static/facebook.css000066400000000000000000000027241322420601000216160ustar00rootroot00000000000000/* * Copyright 2009 Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ body { background: white; color: black; margin: 15px; } body, input, textarea { font-family: "Lucida Grande", Tahoma, Verdana, sans-serif; font-size: 10pt; } table { border-collapse: collapse; border: 0; } td { border: 0; padding: 0; } img { border: 0; } a { text-decoration: none; color: #3b5998; } a:hover { text-decoration: underline; } .post { border-bottom: 1px solid #eeeeee; min-height: 50px; padding-bottom: 10px; margin-top: 10px; } .post .picture { float: left; } .post .picture img { height: 50px; width: 50px; } .post .body { margin-left: 60px; } .post .media img { border: 1px solid #cccccc; padding: 3px; } .post .media:hover img { border: 1px solid #3b5998; } .post a.actor { font-weight: bold; } .post .meta { font-size: 11px; } .post a.permalink { color: #777777; } #body { max-width: 700px; margin: auto; } tornado-4.5.3/demos/facebook/static/facebook.js000066400000000000000000000000001322420601000214230ustar00rootroot00000000000000tornado-4.5.3/demos/facebook/templates/000077500000000000000000000000001322420601000200355ustar00rootroot00000000000000tornado-4.5.3/demos/facebook/templates/modules/000077500000000000000000000000001322420601000215055ustar00rootroot00000000000000tornado-4.5.3/demos/facebook/templates/modules/post.html000066400000000000000000000013761322420601000233670ustar00rootroot00000000000000
{% set author_url="http://www.facebook.com/profile.php?id=" + escape(post["from"]["id"]) %}
{{ escape(post["from"]["name"]) }} {% if "message" in post %} {{ escape(post["message"]) }} {% end %}
tornado-4.5.3/demos/facebook/templates/stream.html000066400000000000000000000011631322420601000222170ustar00rootroot00000000000000 Tornado Facebook Stream Demo
{{ escape(current_user["name"]) }} - {{ _("Sign out") }}
{% for post in stream["data"] %} {{ modules.Post(post) }} {% end %}
tornado-4.5.3/demos/file_upload/000077500000000000000000000000001322420601000165515ustar00rootroot00000000000000tornado-4.5.3/demos/file_upload/file_receiver.py000066400000000000000000000032011322420601000217220ustar00rootroot00000000000000#!/usr/bin/env python """Usage: python file_receiver.py Demonstrates a server that receives a multipart-form-encoded set of files in an HTTP POST, or streams in the raw data of a single file in an HTTP PUT. See file_uploader.py in this directory for code that uploads files in this format. """ import logging try: from urllib.parse import unquote except ImportError: # Python 2. from urllib import unquote import tornado.ioloop import tornado.web from tornado import options class POSTHandler(tornado.web.RequestHandler): def post(self): for field_name, files in self.request.files.items(): for info in files: filename, content_type = info['filename'], info['content_type'] body = info['body'] logging.info('POST "%s" "%s" %d bytes', filename, content_type, len(body)) self.write('OK') @tornado.web.stream_request_body class PUTHandler(tornado.web.RequestHandler): def initialize(self): self.bytes_read = 0 def data_received(self, chunk): self.bytes_read += len(chunk) def put(self, filename): filename = unquote(filename) mtype = self.request.headers.get('Content-Type') logging.info('PUT "%s" "%s" %d bytes', filename, mtype, self.bytes_read) self.write('OK') def make_app(): return tornado.web.Application([ (r"/post", POSTHandler), (r"/(.*)", PUTHandler), ]) if __name__ == "__main__": # Tornado configures logging. options.parse_command_line() app = make_app() app.listen(8888) tornado.ioloop.IOLoop.current().start() tornado-4.5.3/demos/file_upload/file_uploader.py000066400000000000000000000070271322420601000217430ustar00rootroot00000000000000#!/usr/bin/env python """Usage: python file_uploader.py [--put] file1.txt file2.png ... Demonstrates uploading files to a server, without concurrency. It can either POST a multipart-form-encoded request containing one or more files, or PUT a single file without encoding. See also file_receiver.py in this directory, a server that receives uploads. """ import mimetypes import os import sys from functools import partial from uuid import uuid4 try: from urllib.parse import quote except ImportError: # Python 2. from urllib import quote from tornado import gen, httpclient, ioloop from tornado.options import define, options # Using HTTP POST, upload one or more files in a single multipart-form-encoded # request. @gen.coroutine def multipart_producer(boundary, filenames, write): boundary_bytes = boundary.encode() for filename in filenames: filename_bytes = filename.encode() write(b'--%s\r\n' % (boundary_bytes,)) write(b'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (filename_bytes, filename_bytes)) mtype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' write(b'Content-Type: %s\r\n' % (mtype.encode(),)) write(b'\r\n') with open(filename, 'rb') as f: while True: # 16k at a time. chunk = f.read(16 * 1024) if not chunk: break write(chunk) # Let the IOLoop process its event queue. yield gen.moment write(b'\r\n') yield gen.moment write(b'--%s--\r\n' % (boundary_bytes,)) # Using HTTP PUT, upload one raw file. This is preferred for large files since # the server can stream the data instead of buffering it entirely in memory. @gen.coroutine def post(filenames): client = httpclient.AsyncHTTPClient() boundary = uuid4().hex headers = {'Content-Type': 'multipart/form-data; boundary=%s' % boundary} producer = partial(multipart_producer, boundary, filenames) response = yield client.fetch('http://localhost:8888/post', method='POST', headers=headers, body_producer=producer) print(response) @gen.coroutine def raw_producer(filename, write): with open(filename, 'rb') as f: while True: # 16K at a time. chunk = f.read(16 * 1024) if not chunk: # Complete. break write(chunk) @gen.coroutine def put(filenames): client = httpclient.AsyncHTTPClient() for filename in filenames: mtype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' headers = {'Content-Type': mtype} producer = partial(raw_producer, filename) url_path = quote(os.path.basename(filename)) response = yield client.fetch('http://localhost:8888/%s' % url_path, method='PUT', headers=headers, body_producer=producer) print(response) define("put", type=bool, help="Use PUT instead of POST", group="file uploader") # Tornado configures logging from command line opts and returns remaining args. filenames = options.parse_command_line() if not filenames: print("Provide a list of filenames to upload.", file=sys.stderr) sys.exit(1) method = put if options.put else post ioloop.IOLoop.current().run_sync(lambda: method(filenames)) tornado-4.5.3/demos/helloworld/000077500000000000000000000000001322420601000164415ustar00rootroot00000000000000tornado-4.5.3/demos/helloworld/helloworld.py000077500000000000000000000023061322420601000211720ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web from tornado.options import define, options define("port", default=8888, help="run on the given port", type=int) class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") def main(): tornado.options.parse_command_line() application = tornado.web.Application([ (r"/", MainHandler), ]) http_server = tornado.httpserver.HTTPServer(application) http_server.listen(options.port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main() tornado-4.5.3/demos/s3server/000077500000000000000000000000001322420601000160425ustar00rootroot00000000000000tornado-4.5.3/demos/s3server/s3server.py000066400000000000000000000226621322420601000202000ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of an S3-like storage server based on local files. Useful to test features that will eventually run on S3, or if you want to run something locally that was once running on S3. We don't support all the features of S3, but it does work with the standard S3 client for the most basic semantics. To use the standard S3 client with this module: c = S3.AWSAuthConnection("", "", server="localhost", port=8888, is_secure=False) c.create_bucket("mybucket") c.put("mybucket", "mykey", "a value") print c.get("mybucket", "mykey").body """ import bisect import datetime import hashlib import os import os.path import urllib from tornado import escape from tornado import httpserver from tornado import ioloop from tornado import web def start(port, root_directory="/tmp/s3", bucket_depth=0): """Starts the mock S3 server on the given port at the given path.""" application = S3Application(root_directory, bucket_depth) http_server = httpserver.HTTPServer(application) http_server.listen(port) ioloop.IOLoop.current().start() class S3Application(web.Application): """Implementation of an S3-like storage server based on local files. If bucket depth is given, we break files up into multiple directories to prevent hitting file system limits for number of files in each directories. 1 means one level of directories, 2 means 2, etc. """ def __init__(self, root_directory, bucket_depth=0): web.Application.__init__(self, [ (r"/", RootHandler), (r"/([^/]+)/(.+)", ObjectHandler), (r"/([^/]+)/", BucketHandler), ]) self.directory = os.path.abspath(root_directory) if not os.path.exists(self.directory): os.makedirs(self.directory) self.bucket_depth = bucket_depth class BaseRequestHandler(web.RequestHandler): SUPPORTED_METHODS = ("PUT", "GET", "DELETE") def render_xml(self, value): assert isinstance(value, dict) and len(value) == 1 self.set_header("Content-Type", "application/xml; charset=UTF-8") name = value.keys()[0] parts = [] parts.append('<' + escape.utf8(name) + ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">') self._render_parts(value.values()[0], parts) parts.append('') self.finish('\n' + ''.join(parts)) def _render_parts(self, value, parts=[]): if isinstance(value, (unicode, bytes)): parts.append(escape.xhtml_escape(value)) elif isinstance(value, int) or isinstance(value, long): parts.append(str(value)) elif isinstance(value, datetime.datetime): parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z")) elif isinstance(value, dict): for name, subvalue in value.iteritems(): if not isinstance(subvalue, list): subvalue = [subvalue] for subsubvalue in subvalue: parts.append('<' + escape.utf8(name) + '>') self._render_parts(subsubvalue, parts) parts.append('') else: raise Exception("Unknown S3 value type %r", value) def _object_path(self, bucket, object_name): if self.application.bucket_depth < 1: return os.path.abspath(os.path.join( self.application.directory, bucket, object_name)) hash = hashlib.md5(object_name).hexdigest() path = os.path.abspath(os.path.join( self.application.directory, bucket)) for i in range(self.application.bucket_depth): path = os.path.join(path, hash[:2 * (i + 1)]) return os.path.join(path, object_name) class RootHandler(BaseRequestHandler): def get(self): names = os.listdir(self.application.directory) buckets = [] for name in names: path = os.path.join(self.application.directory, name) info = os.stat(path) buckets.append({ "Name": name, "CreationDate": datetime.datetime.utcfromtimestamp( info.st_ctime), }) self.render_xml({"ListAllMyBucketsResult": { "Buckets": {"Bucket": buckets}, }}) class BucketHandler(BaseRequestHandler): def get(self, bucket_name): prefix = self.get_argument("prefix", u"") marker = self.get_argument("marker", u"") max_keys = int(self.get_argument("max-keys", 50000)) path = os.path.abspath(os.path.join(self.application.directory, bucket_name)) terse = int(self.get_argument("terse", 0)) if not path.startswith(self.application.directory) or \ not os.path.isdir(path): raise web.HTTPError(404) object_names = [] for root, dirs, files in os.walk(path): for file_name in files: object_names.append(os.path.join(root, file_name)) skip = len(path) + 1 for i in range(self.application.bucket_depth): skip += 2 * (i + 1) + 1 object_names = [n[skip:] for n in object_names] object_names.sort() contents = [] start_pos = 0 if marker: start_pos = bisect.bisect_right(object_names, marker, start_pos) if prefix: start_pos = bisect.bisect_left(object_names, prefix, start_pos) truncated = False for object_name in object_names[start_pos:]: if not object_name.startswith(prefix): break if len(contents) >= max_keys: truncated = True break object_path = self._object_path(bucket_name, object_name) c = {"Key": object_name} if not terse: info = os.stat(object_path) c.update({ "LastModified": datetime.datetime.utcfromtimestamp( info.st_mtime), "Size": info.st_size, }) contents.append(c) marker = object_name self.render_xml({"ListBucketResult": { "Name": bucket_name, "Prefix": prefix, "Marker": marker, "MaxKeys": max_keys, "IsTruncated": truncated, "Contents": contents, }}) def put(self, bucket_name): path = os.path.abspath(os.path.join( self.application.directory, bucket_name)) if not path.startswith(self.application.directory) or \ os.path.exists(path): raise web.HTTPError(403) os.makedirs(path) self.finish() def delete(self, bucket_name): path = os.path.abspath(os.path.join( self.application.directory, bucket_name)) if not path.startswith(self.application.directory) or \ not os.path.isdir(path): raise web.HTTPError(404) if len(os.listdir(path)) > 0: raise web.HTTPError(403) os.rmdir(path) self.set_status(204) self.finish() class ObjectHandler(BaseRequestHandler): def get(self, bucket, object_name): object_name = urllib.unquote(object_name) path = self._object_path(bucket, object_name) if not path.startswith(self.application.directory) or \ not os.path.isfile(path): raise web.HTTPError(404) info = os.stat(path) self.set_header("Content-Type", "application/unknown") self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp( info.st_mtime)) object_file = open(path, "rb") try: self.finish(object_file.read()) finally: object_file.close() def put(self, bucket, object_name): object_name = urllib.unquote(object_name) bucket_dir = os.path.abspath(os.path.join( self.application.directory, bucket)) if not bucket_dir.startswith(self.application.directory) or \ not os.path.isdir(bucket_dir): raise web.HTTPError(404) path = self._object_path(bucket, object_name) if not path.startswith(bucket_dir) or os.path.isdir(path): raise web.HTTPError(403) directory = os.path.dirname(path) if not os.path.exists(directory): os.makedirs(directory) object_file = open(path, "w") object_file.write(self.request.body) object_file.close() self.finish() def delete(self, bucket, object_name): object_name = urllib.unquote(object_name) path = self._object_path(bucket, object_name) if not path.startswith(self.application.directory) or \ not os.path.isfile(path): raise web.HTTPError(404) os.unlink(path) self.set_status(204) self.finish() tornado-4.5.3/demos/tcpecho/000077500000000000000000000000001322420601000157135ustar00rootroot00000000000000tornado-4.5.3/demos/tcpecho/README.md000066400000000000000000000011341322420601000171710ustar00rootroot00000000000000TCP echo demo ============= This demo shows how to use Tornado's asynchronous TCP client and server by implementing `handle_stream` as a coroutine. To run the server: ``` $ python server.py ``` The client will send the message given with the `--message` option (which defaults to "ping"), wait for a response, then quit. To run: ``` $ python client.py --message="your message here" ``` Alternatively, you can interactively send messages to the echo server with a telnet client. For example: ``` $ telnet localhost 9888 Trying ::1... Connected to localhost. Escape character is '^]'. ping ping ``` tornado-4.5.3/demos/tcpecho/client.py000066400000000000000000000014071322420601000175450ustar00rootroot00000000000000from __future__ import print_function from tornado.ioloop import IOLoop from tornado import gen from tornado.tcpclient import TCPClient from tornado.options import options, define define("host", default="localhost", help="TCP server host") define("port", default=9888, help="TCP port to connect to") define("message", default="ping", help="Message to send") @gen.coroutine def send_message(): stream = yield TCPClient().connect(options.host, options.port) yield stream.write((options.message + "\n").encode()) print("Sent to server:", options.message) reply = yield stream.read_until(b"\n") print("Response from server:", reply.decode().strip()) if __name__ == "__main__": options.parse_command_line() IOLoop.current().run_sync(send_message) tornado-4.5.3/demos/tcpecho/server.py000066400000000000000000000020521322420601000175720ustar00rootroot00000000000000import logging from tornado.ioloop import IOLoop from tornado import gen from tornado.iostream import StreamClosedError from tornado.tcpserver import TCPServer from tornado.options import options, define define("port", default=9888, help="TCP port to listen on") logger = logging.getLogger(__name__) class EchoServer(TCPServer): @gen.coroutine def handle_stream(self, stream, address): while True: try: data = yield stream.read_until(b"\n") logger.info("Received bytes: %s", data) if not data.endswith(b"\n"): data = data + b"\n" yield stream.write(data) except StreamClosedError: logger.warning("Lost client at host %s", address[0]) break except Exception as e: print(e) if __name__ == "__main__": options.parse_command_line() server = EchoServer() server.listen(options.port) logger.info("Listening on TCP port %d", options.port) IOLoop.current().start() tornado-4.5.3/demos/twitter/000077500000000000000000000000001322420601000157705ustar00rootroot00000000000000tornado-4.5.3/demos/twitter/home.html000066400000000000000000000003601322420601000176050ustar00rootroot00000000000000 Tornado Twitter Demo
    {% for tweet in timeline %}
  • {{ tweet['user']['screen_name'] }}: {{ tweet['text'] }}
  • {% end %}
tornado-4.5.3/demos/twitter/twitterdemo.py000066400000000000000000000066441322420601000207230ustar00rootroot00000000000000#!/usr/bin/env python """A simplistic Twitter viewer to demonstrate the use of TwitterMixin. To run this app, you must first register an application with Twitter: 1) Go to https://dev.twitter.com/apps and create an application. Your application must have a callback URL registered with Twitter. It doesn't matter what it is, but it has to be there (Twitter won't let you use localhost in a registered callback URL, but that won't stop you from running this demo on localhost). 2) Create a file called "secrets.cfg" and put your consumer key and secret (which Twitter gives you when you register an app) in it: twitter_consumer_key = 'asdf1234' twitter_consumer_secret = 'qwer5678' (you could also generate a random value for "cookie_secret" and put it in the same file, although it's not necessary to run this demo) 3) Run this program and go to http://localhost:8888 (by default) in your browser. """ import logging from tornado.auth import TwitterMixin from tornado.escape import json_decode, json_encode from tornado.ioloop import IOLoop from tornado import gen from tornado.options import define, options, parse_command_line, parse_config_file from tornado.web import Application, RequestHandler, authenticated define('port', default=8888, help="port to listen on") define('config_file', default='secrets.cfg', help='filename for additional configuration') define('debug', default=False, group='application', help="run in debug mode (with automatic reloading)") # The following settings should probably be defined in secrets.cfg define('twitter_consumer_key', type=str, group='application') define('twitter_consumer_secret', type=str, group='application') define('cookie_secret', type=str, group='application', default='__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE__', help="signing key for secure cookies") class BaseHandler(RequestHandler): COOKIE_NAME = 'twitterdemo_user' def get_current_user(self): user_json = self.get_secure_cookie(self.COOKIE_NAME) if not user_json: return None return json_decode(user_json) class MainHandler(BaseHandler, TwitterMixin): @authenticated @gen.coroutine def get(self): timeline = yield self.twitter_request( '/statuses/home_timeline', access_token=self.current_user['access_token']) self.render('home.html', timeline=timeline) class LoginHandler(BaseHandler, TwitterMixin): @gen.coroutine def get(self): if self.get_argument('oauth_token', None): user = yield self.get_authenticated_user() del user["description"] self.set_secure_cookie(self.COOKIE_NAME, json_encode(user)) self.redirect(self.get_argument('next', '/')) else: yield self.authorize_redirect(callback_uri=self.request.full_url()) class LogoutHandler(BaseHandler): def get(self): self.clear_cookie(self.COOKIE_NAME) def main(): parse_command_line(final=False) parse_config_file(options.config_file) app = Application( [ ('/', MainHandler), ('/login', LoginHandler), ('/logout', LogoutHandler), ], login_url='/login', **options.group_dict('application')) app.listen(options.port) logging.info('Listening on http://localhost:%d' % options.port) IOLoop.current().start() if __name__ == '__main__': main() tornado-4.5.3/demos/websocket/000077500000000000000000000000001322420601000162545ustar00rootroot00000000000000tornado-4.5.3/demos/websocket/chatdemo.py000077500000000000000000000061361322420601000204230ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Simplified chat demo for websockets. Authentication, error handling, etc are left as an exercise for the reader :) """ import logging import tornado.escape import tornado.ioloop import tornado.options import tornado.web import tornado.websocket import os.path import uuid from tornado.options import define, options define("port", default=8888, help="run on the given port", type=int) class Application(tornado.web.Application): def __init__(self): handlers = [ (r"/", MainHandler), (r"/chatsocket", ChatSocketHandler), ] settings = dict( cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), xsrf_cookies=True, ) super(Application, self).__init__(handlers, **settings) class MainHandler(tornado.web.RequestHandler): def get(self): self.render("index.html", messages=ChatSocketHandler.cache) class ChatSocketHandler(tornado.websocket.WebSocketHandler): waiters = set() cache = [] cache_size = 200 def get_compression_options(self): # Non-None enables compression with default options. return {} def open(self): ChatSocketHandler.waiters.add(self) def on_close(self): ChatSocketHandler.waiters.remove(self) @classmethod def update_cache(cls, chat): cls.cache.append(chat) if len(cls.cache) > cls.cache_size: cls.cache = cls.cache[-cls.cache_size:] @classmethod def send_updates(cls, chat): logging.info("sending message to %d waiters", len(cls.waiters)) for waiter in cls.waiters: try: waiter.write_message(chat) except: logging.error("Error sending message", exc_info=True) def on_message(self, message): logging.info("got message %r", message) parsed = tornado.escape.json_decode(message) chat = { "id": str(uuid.uuid4()), "body": parsed["body"], } chat["html"] = tornado.escape.to_basestring( self.render_string("message.html", message=chat)) ChatSocketHandler.update_cache(chat) ChatSocketHandler.send_updates(chat) def main(): tornado.options.parse_command_line() app = Application() app.listen(options.port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main() tornado-4.5.3/demos/websocket/static/000077500000000000000000000000001322420601000175435ustar00rootroot00000000000000tornado-4.5.3/demos/websocket/static/chat.css000066400000000000000000000017351322420601000212020ustar00rootroot00000000000000/* * Copyright 2009 FriendFeed * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ body { background: white; margin: 10px; } body, input { font-family: sans-serif; font-size: 10pt; color: black; } table { border-collapse: collapse; border: 0; } td { border: 0; padding: 0; } #body { position: absolute; bottom: 10px; left: 10px; } #input { margin-top: 0.5em; } #inbox .message { padding-top: 0.25em; } #nav { float: right; z-index: 99; } tornado-4.5.3/demos/websocket/static/chat.js000066400000000000000000000037351322420601000210300ustar00rootroot00000000000000// Copyright 2009 FriendFeed // // Licensed under the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. $(document).ready(function() { if (!window.console) window.console = {}; if (!window.console.log) window.console.log = function() {}; $("#messageform").on("submit", function() { newMessage($(this)); return false; }); $("#messageform").on("keypress", function(e) { if (e.keyCode == 13) { newMessage($(this)); return false; } }); $("#message").select(); updater.start(); }); function newMessage(form) { var message = form.formToDict(); updater.socket.send(JSON.stringify(message)); form.find("input[type=text]").val("").select(); } jQuery.fn.formToDict = function() { var fields = this.serializeArray(); var json = {} for (var i = 0; i < fields.length; i++) { json[fields[i].name] = fields[i].value; } if (json.next) delete json.next; return json; }; var updater = { socket: null, start: function() { var url = "ws://" + location.host + "/chatsocket"; updater.socket = new WebSocket(url); updater.socket.onmessage = function(event) { updater.showMessage(JSON.parse(event.data)); } }, showMessage: function(message) { var existing = $("#m" + message.id); if (existing.length > 0) return; var node = $(message.html); node.hide(); $("#inbox").append(node); node.slideDown(); } }; tornado-4.5.3/demos/websocket/templates/000077500000000000000000000000001322420601000202525ustar00rootroot00000000000000tornado-4.5.3/demos/websocket/templates/index.html000066400000000000000000000021001322420601000222400ustar00rootroot00000000000000 Tornado Chat Demo
{% for message in messages %} {% include "message.html" %} {% end %}
{% module xsrf_form_html() %}
tornado-4.5.3/demos/websocket/templates/message.html000066400000000000000000000001331322420601000225610ustar00rootroot00000000000000
{% module linkify(message["body"]) %}
tornado-4.5.3/demos/webspider/000077500000000000000000000000001322420601000162525ustar00rootroot00000000000000tornado-4.5.3/demos/webspider/webspider.py000066400000000000000000000054231322420601000206140ustar00rootroot00000000000000#!/usr/bin/env python import time from datetime import timedelta try: from HTMLParser import HTMLParser from urlparse import urljoin, urldefrag except ImportError: from html.parser import HTMLParser from urllib.parse import urljoin, urldefrag from tornado import httpclient, gen, ioloop, queues base_url = 'http://www.tornadoweb.org/en/stable/' concurrency = 10 @gen.coroutine def get_links_from_url(url): """Download the page at `url` and parse it for links. Returned links have had the fragment after `#` removed, and have been made absolute so, e.g. the URL 'gen.html#tornado.gen.coroutine' becomes 'http://www.tornadoweb.org/en/stable/gen.html'. """ try: response = yield httpclient.AsyncHTTPClient().fetch(url) print('fetched %s' % url) html = response.body if isinstance(response.body, str) \ else response.body.decode() urls = [urljoin(url, remove_fragment(new_url)) for new_url in get_links(html)] except Exception as e: print('Exception: %s %s' % (e, url)) raise gen.Return([]) raise gen.Return(urls) def remove_fragment(url): pure_url, frag = urldefrag(url) return pure_url def get_links(html): class URLSeeker(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.urls = [] def handle_starttag(self, tag, attrs): href = dict(attrs).get('href') if href and tag == 'a': self.urls.append(href) url_seeker = URLSeeker() url_seeker.feed(html) return url_seeker.urls @gen.coroutine def main(): q = queues.Queue() start = time.time() fetching, fetched = set(), set() @gen.coroutine def fetch_url(): current_url = yield q.get() try: if current_url in fetching: return print('fetching %s' % current_url) fetching.add(current_url) urls = yield get_links_from_url(current_url) fetched.add(current_url) for new_url in urls: # Only follow links beneath the base URL if new_url.startswith(base_url): yield q.put(new_url) finally: q.task_done() @gen.coroutine def worker(): while True: yield fetch_url() q.put(base_url) # Start workers, then wait for the work queue to be empty. for _ in range(concurrency): worker() yield q.join(timeout=timedelta(seconds=300)) assert fetching == fetched print('Done in %d seconds, fetched %s URLs.' % ( time.time() - start, len(fetched))) if __name__ == '__main__': import logging logging.basicConfig() io_loop = ioloop.IOLoop.current() io_loop.run_sync(main) tornado-4.5.3/docs/000077500000000000000000000000001322420601000141075ustar00rootroot00000000000000tornado-4.5.3/docs/Makefile000066400000000000000000000014541322420601000155530ustar00rootroot00000000000000.PHONY: all all: sphinx # No -W for doctests because that disallows tests with empty output. SPHINX_DOCTEST_OPTS=-n -d build/doctress . SPHINXOPTS=-n -W -d build/doctrees . .PHONY: sphinx sphinx: sphinx-build -b html $(SPHINXOPTS) build/html .PHONY: coverage coverage: sphinx-build -b coverage ${SPHINXOPTS} build/coverage cat build/coverage/python.txt .PHONY: latex latex: sphinx-build -b latex $(SPHINXOPTS) build/latex # Building a pdf requires a latex installation. For macports, the needed # packages are texlive-latex-extra and texlive-fonts-recommended. # The output is in build/latex/tornado.pdf .PHONY: pdf pdf: latex cd build/latex && pdflatex -interaction=nonstopmode tornado.tex .PHONY: doctest doctest: sphinx-build -b doctest $(SPHINX_DOCTEST_OPTS) build/doctest clean: rm -rf build tornado-4.5.3/docs/asyncio.rst000066400000000000000000000003071322420601000163060ustar00rootroot00000000000000``tornado.platform.asyncio`` --- Bridge between ``asyncio`` and Tornado ======================================================================= .. automodule:: tornado.platform.asyncio :members: tornado-4.5.3/docs/auth.rst000066400000000000000000000022201322420601000155760ustar00rootroot00000000000000``tornado.auth`` --- Third-party login with OpenID and OAuth ============================================================ .. testsetup:: import tornado.auth, tornado.gen, tornado.web .. automodule:: tornado.auth Common protocols ---------------- These classes implement the OpenID and OAuth standards. They will generally need to be subclassed to use them with any particular site. The degree of customization required will vary, but in most cases overridding the class attributes (which are named beginning with underscores for historical reasons) should be sufficient. .. autoclass:: OpenIdMixin :members: .. autoclass:: OAuthMixin .. automethod:: authorize_redirect .. automethod:: get_authenticated_user .. automethod:: _oauth_consumer_token .. automethod:: _oauth_get_user_future .. automethod:: get_auth_http_client .. autoclass:: OAuth2Mixin :members: Google ------ .. autoclass:: GoogleOAuth2Mixin :members: Facebook -------- .. autoclass:: FacebookGraphMixin :members: Twitter ------- .. autoclass:: TwitterMixin :members: tornado-4.5.3/docs/autoreload.rst000066400000000000000000000003111322420601000167730ustar00rootroot00000000000000``tornado.autoreload`` --- Automatically detect code changes in development =========================================================================== .. automodule:: tornado.autoreload :members: tornado-4.5.3/docs/caresresolver.rst000066400000000000000000000014651322420601000175260ustar00rootroot00000000000000``tornado.platform.caresresolver`` --- Asynchronous DNS Resolver using C-Ares ============================================================================= .. module:: tornado.platform.caresresolver This module contains a DNS resolver using the c-ares library (and its wrapper ``pycares``). .. py:class:: CaresResolver Name resolver based on the c-ares library. This is a non-blocking and non-threaded resolver. It may not produce the same results as the system resolver, but can be used for non-blocking resolution when threads cannot be used. c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``, so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is the default for ``tornado.simple_httpclient``, but other libraries may default to ``AF_UNSPEC``. tornado-4.5.3/docs/concurrent.rst000066400000000000000000000014561322420601000170310ustar00rootroot00000000000000``tornado.concurrent`` --- Work with threads and futures ======================================================== .. testsetup:: from tornado.concurrent import * from tornado import gen .. automodule:: tornado.concurrent :members: :exclude-members: Future, TracebackFuture .. autoclass:: Future Consumer methods ^^^^^^^^^^^^^^^^ .. automethod:: Future.result .. automethod:: Future.exception .. automethod:: Future.exc_info .. automethod:: Future.add_done_callback .. automethod:: Future.done .. automethod:: Future.running .. automethod:: Future.cancel .. automethod:: Future.cancelled Producer methods ^^^^^^^^^^^^^^^^ .. automethod:: Future.set_result .. automethod:: Future.set_exception .. automethod:: Future.set_exc_info tornado-4.5.3/docs/conf.py000066400000000000000000000044401322420601000154100ustar00rootroot00000000000000# Ensure we get the local copy of tornado instead of what's on the standard path import os import sys import time sys.path.insert(0, os.path.abspath("..")) import tornado master_doc = "index" project = "Tornado" copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y") version = release = tornado.version extensions = [ "sphinx.ext.autodoc", "sphinx.ext.coverage", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", ] primary_domain = 'py' default_role = 'py:obj' autodoc_member_order = "bysource" autoclass_content = "both" # Without this line sphinx includes a copy of object.__init__'s docstring # on any class that doesn't define __init__. # https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__ autodoc_docstring_signature = False coverage_skip_undoc_in_source = True coverage_ignore_modules = [ "tornado.platform.asyncio", "tornado.platform.caresresolver", "tornado.platform.twisted", ] # I wish this could go in a per-module file... coverage_ignore_classes = [ # tornado.concurrent "TracebackFuture", # tornado.gen "Runner", # tornado.ioloop "PollIOLoop", # tornado.web "ChunkedTransferEncoding", "GZipContentEncoding", "OutputTransform", "TemplateModule", "url", # tornado.websocket "WebSocketProtocol", "WebSocketProtocol13", "WebSocketProtocol76", ] coverage_ignore_functions = [ # various modules "doctests", "main", # tornado.escape # parse_qs_bytes should probably be documented but it's complicated by # having different implementations between py2 and py3. "parse_qs_bytes", # tornado.gen "Multi", ] html_favicon = 'favicon.ico' latex_documents = [ ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False), ] intersphinx_mapping = { 'python': ('https://docs.python.org/3.5/', None), } on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # On RTD we can't import sphinx_rtd_theme, but it will be applied by # default anyway. This block will use the same theme when building locally # as on RTD. if not on_rtd: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] tornado-4.5.3/docs/coroutine.rst000066400000000000000000000001701322420601000166460ustar00rootroot00000000000000Coroutines and concurrency ========================== .. toctree:: gen concurrent locks queues process tornado-4.5.3/docs/escape.rst000066400000000000000000000021231322420601000160770ustar00rootroot00000000000000``tornado.escape`` --- Escaping and string manipulation ======================================================= .. automodule:: tornado.escape Escaping functions ------------------ .. autofunction:: xhtml_escape .. autofunction:: xhtml_unescape .. autofunction:: url_escape .. autofunction:: url_unescape .. autofunction:: json_encode .. autofunction:: json_decode Byte/unicode conversions ------------------------ These functions are used extensively within Tornado itself, but should not be directly needed by most applications. Note that much of the complexity of these functions comes from the fact that Tornado supports both Python 2 and Python 3. .. autofunction:: utf8 .. autofunction:: to_unicode .. function:: native_str Converts a byte or unicode string into type `str`. Equivalent to `utf8` on Python 2 and `to_unicode` on Python 3. .. autofunction:: to_basestring .. autofunction:: recursive_unicode Miscellaneous functions ----------------------- .. autofunction:: linkify .. autofunction:: squeeze tornado-4.5.3/docs/faq.rst000066400000000000000000000073251322420601000154170ustar00rootroot00000000000000Frequently Asked Questions ========================== .. contents:: :local: Why isn't this example with ``time.sleep()`` running in parallel? ----------------------------------------------------------------- Many people's first foray into Tornado's concurrency looks something like this:: class BadExampleHandler(RequestHandler): def get(self): for i in range(5): print(i) time.sleep(1) Fetch this handler twice at the same time and you'll see that the second five-second countdown doesn't start until the first one has completely finished. The reason for this is that `time.sleep` is a **blocking** function: it doesn't allow control to return to the `.IOLoop` so that other handlers can be run. Of course, `time.sleep` is really just a placeholder in these examples, the point is to show what happens when something in a handler gets slow. No matter what the real code is doing, to achieve concurrency blocking code must be replaced with non-blocking equivalents. This means one of three things: 1. *Find a coroutine-friendly equivalent.* For `time.sleep`, use `tornado.gen.sleep` instead:: class CoroutineSleepHandler(RequestHandler): @gen.coroutine def get(self): for i in range(5): print(i) yield gen.sleep(1) When this option is available, it is usually the best approach. See the `Tornado wiki `_ for links to asynchronous libraries that may be useful. 2. *Find a callback-based equivalent.* Similar to the first option, callback-based libraries are available for many tasks, although they are slightly more complicated to use than a library designed for coroutines. These are typically used with `tornado.gen.Task` as an adapter:: class CoroutineTimeoutHandler(RequestHandler): @gen.coroutine def get(self): io_loop = IOLoop.current() for i in range(5): print(i) yield gen.Task(io_loop.add_timeout, io_loop.time() + 1) Again, the `Tornado wiki `_ can be useful to find suitable libraries. 3. *Run the blocking code on another thread.* When asynchronous libraries are not available, `concurrent.futures.ThreadPoolExecutor` can be used to run any blocking code on another thread. This is a universal solution that can be used for any blocking function whether an asynchronous counterpart exists or not:: executor = concurrent.futures.ThreadPoolExecutor(8) class ThreadPoolHandler(RequestHandler): @gen.coroutine def get(self): for i in range(5): print(i) yield executor.submit(time.sleep, 1) See the :doc:`Asynchronous I/O ` chapter of the Tornado user's guide for more on blocking and asynchronous functions. My code is asynchronous, but it's not running in parallel in two browser tabs. ------------------------------------------------------------------------------ Even when a handler is asynchronous and non-blocking, it can be surprisingly tricky to verify this. Browsers will recognize that you are trying to load the same page in two different tabs and delay the second request until the first has finished. To work around this and see that the server is in fact working in parallel, do one of two things: * Add something to your urls to make them unique. Instead of ``http://localhost:8888`` in both tabs, load ``http://localhost:8888/?x=1`` in one and ``http://localhost:8888/?x=2`` in the other. * Use two different browsers. For example, Firefox will be able to load a url even while that same url is being loaded in a Chrome tab. tornado-4.5.3/docs/favicon.ico000066400000000000000000000017761322420601000162430ustar00rootroot00000000000000 è(  ëçÇæÜ´†åÚ±çæÝµsèåÀìéÉíêËëéÊ èã¾OÞÆ—Ï×±{ÿÛ¼Šÿ娱 ëêÉÿÿÿëéÊ3ãÔ¬¨ÝÓÿ×°zÿáË þáÍ¢þßÇ›ÿèݺRøÿõÿÿÿîëÏLå×°ŒàÌ ÛÚ»‰ÿÚºˆÿãѪþéàÀÿíìÐþÝÁ”ÿæ×²áëåÆùÿôìçË‚áÌ îÞÇ™ÿßÇšÿÞÖþßÅ™þèÛ¹ÿíèÌÿçܺÿêáÂþßĘÿìæÊwððÙðíÕSãѪýàÊŸÿëäÇþîèÍþíçËÿëâÄÿîéÎÿðíÕÿæØµÿàÉ þÝ¿‘ÿìâÆÎíçÍîèÌÇÝÆ–ÿïëÕþòðÛÿòðÛÿòòÞÿòòÞÿòñÜÿòðÜÿòïÚÿìâÇÿÙ¶‚ÿíãËÓëàÆêâÅüãÓ«ÿóóâÿòñÞÿíæÍÿåÕ¯ÿèÛºÿñíØÿôóâÿôóáÿñîÚÿèØ·ÿèÙ¹ÿòïÝ#íåÊü娱ÿôóäÿðêÔÿßÌžÿÝÇ–ÿÛÁŒÿßÈšÿïèÑÿö÷éÿïèÑÿç×¶þßÇœÿõóãuñíÙÔâÖ©ÿïêÒþêàÁÿå×±ÿâÒ©ÿÝÈ•ÿÛÄÿéÜ»ÿöôçÿùúñÿèÙºþàÊ¢ÿ÷õé‘ööêuçàºÿëæÈþéß½ÿêâÃÿèÞ¾ÿâÓ§ÿÞÌ›ÿëáÄÿ÷ôèþöóåþåÖ°ÿç×µÿ÷õëYøøíòñÞÙæá¸ÿíèËþìæÉÿêãÃÿëãÅÿæÜ¸ÿìäÈþïéÓÿìãÉÿèÛºïõðãŸùùñúùô?òðÙæéå¿ÿëçÆþïëÒþëæÆÿíèÌÿðêÔÿóîÜÌóîÝ}üýùMÿÿÿüüø?õóâÐìéÈÿèä»ÿïìÏþðîÕÿíêÌÿ÷õçëÿÿþ$ùùëÿþþùøï…ôóáùìèÅÿåà³ÿæã·ÿ÷öèåÿÿÿ&ýýúüüûÿþýÿÿÿÿÿÿüüùýýúlúúò¿øöê÷ûúõ–ÿÿÿþ|þøà € €€€À<à<ð<þ<tornado-4.5.3/docs/gen.rst000066400000000000000000000031341322420601000154130ustar00rootroot00000000000000``tornado.gen`` --- Simplify asynchronous code ============================================== .. testsetup:: from tornado.web import * from tornado import gen .. automodule:: tornado.gen Decorators ---------- .. autofunction:: coroutine .. autofunction:: engine Utility functions ----------------- .. autoexception:: Return .. autofunction:: with_timeout .. autoexception:: TimeoutError .. autofunction:: sleep .. autodata:: moment :annotation: .. autoclass:: WaitIterator :members: .. autofunction:: multi .. autofunction:: multi_future .. autofunction:: Task .. class:: Arguments The result of a `Task` or `Wait` whose callback had more than one argument (or keyword arguments). The `Arguments` object is a `collections.namedtuple` and can be used either as a tuple ``(args, kwargs)`` or an object with attributes ``args`` and ``kwargs``. .. autofunction:: convert_yielded .. autofunction:: maybe_future .. autofunction:: is_coroutine_function Legacy interface ---------------- Before support for `Futures <.Future>` was introduced in Tornado 3.0, coroutines used subclasses of `YieldPoint` in their ``yield`` expressions. These classes are still supported but should generally not be used except for compatibility with older interfaces. None of these classes are compatible with native (``await``-based) coroutines. .. autoclass:: YieldPoint :members: .. autoclass:: Callback .. autoclass:: Wait .. autoclass:: WaitAll .. autoclass:: MultiYieldPoint tornado-4.5.3/docs/guide.rst000066400000000000000000000002641322420601000157400ustar00rootroot00000000000000User's guide ============ .. toctree:: guide/intro guide/async guide/coroutines guide/queues guide/structure guide/templates guide/security guide/running tornado-4.5.3/docs/guide/000077500000000000000000000000001322420601000152045ustar00rootroot00000000000000tornado-4.5.3/docs/guide/async.rst000066400000000000000000000114251322420601000170560ustar00rootroot00000000000000Asynchronous and non-Blocking I/O --------------------------------- Real-time web features require a long-lived mostly-idle connection per user. In a traditional synchronous web server, this implies devoting one thread to each user, which can be very expensive. To minimize the cost of concurrent connections, Tornado uses a single-threaded event loop. This means that all application code should aim to be asynchronous and non-blocking because only one operation can be active at a time. The terms asynchronous and non-blocking are closely related and are often used interchangeably, but they are not quite the same thing. Blocking ~~~~~~~~ A function **blocks** when it waits for something to happen before returning. A function may block for many reasons: network I/O, disk I/O, mutexes, etc. In fact, *every* function blocks, at least a little bit, while it is running and using the CPU (for an extreme example that demonstrates why CPU blocking must be taken as seriously as other kinds of blocking, consider password hashing functions like `bcrypt `_, which by design use hundreds of milliseconds of CPU time, far more than a typical network or disk access). A function can be blocking in some respects and non-blocking in others. For example, `tornado.httpclient` in the default configuration blocks on DNS resolution but not on other network access (to mitigate this use `.ThreadedResolver` or a ``tornado.curl_httpclient`` with a properly-configured build of ``libcurl``). In the context of Tornado we generally talk about blocking in the context of network I/O, although all kinds of blocking are to be minimized. Asynchronous ~~~~~~~~~~~~ An **asynchronous** function returns before it is finished, and generally causes some work to happen in the background before triggering some future action in the application (as opposed to normal **synchronous** functions, which do everything they are going to do before returning). There are many styles of asynchronous interfaces: * Callback argument * Return a placeholder (`.Future`, ``Promise``, ``Deferred``) * Deliver to a queue * Callback registry (e.g. POSIX signals) Regardless of which type of interface is used, asynchronous functions *by definition* interact differently with their callers; there is no free way to make a synchronous function asynchronous in a way that is transparent to its callers (systems like `gevent `_ use lightweight threads to offer performance comparable to asynchronous systems, but they do not actually make things asynchronous). Examples ~~~~~~~~ Here is a sample synchronous function: .. testcode:: from tornado.httpclient import HTTPClient def synchronous_fetch(url): http_client = HTTPClient() response = http_client.fetch(url) return response.body .. testoutput:: :hide: And here is the same function rewritten to be asynchronous with a callback argument: .. testcode:: from tornado.httpclient import AsyncHTTPClient def asynchronous_fetch(url, callback): http_client = AsyncHTTPClient() def handle_response(response): callback(response.body) http_client.fetch(url, callback=handle_response) .. testoutput:: :hide: And again with a `.Future` instead of a callback: .. testcode:: from tornado.concurrent import Future def async_fetch_future(url): http_client = AsyncHTTPClient() my_future = Future() fetch_future = http_client.fetch(url) fetch_future.add_done_callback( lambda f: my_future.set_result(f.result())) return my_future .. testoutput:: :hide: The raw `.Future` version is more complex, but ``Futures`` are nonetheless recommended practice in Tornado because they have two major advantages. Error handling is more consistent since the `.Future.result` method can simply raise an exception (as opposed to the ad-hoc error handling common in callback-oriented interfaces), and ``Futures`` lend themselves well to use with coroutines. Coroutines will be discussed in depth in the next section of this guide. Here is the coroutine version of our sample function, which is very similar to the original synchronous version: .. testcode:: from tornado import gen @gen.coroutine def fetch_coroutine(url): http_client = AsyncHTTPClient() response = yield http_client.fetch(url) raise gen.Return(response.body) .. testoutput:: :hide: The statement ``raise gen.Return(response.body)`` is an artifact of Python 2, in which generators aren't allowed to return values. To overcome this, Tornado coroutines raise a special kind of exception called a `.Return`. The coroutine catches this exception and treats it like a returned value. In Python 3.3 and later, a ``return response.body`` achieves the same result. tornado-4.5.3/docs/guide/coroutines.rst000066400000000000000000000247641322420601000201450ustar00rootroot00000000000000Coroutines ========== .. testsetup:: from tornado import gen **Coroutines** are the recommended way to write asynchronous code in Tornado. Coroutines use the Python ``yield`` keyword to suspend and resume execution instead of a chain of callbacks (cooperative lightweight threads as seen in frameworks like `gevent `_ are sometimes called coroutines as well, but in Tornado all coroutines use explicit context switches and are called as asynchronous functions). Coroutines are almost as simple as synchronous code, but without the expense of a thread. They also `make concurrency easier `_ to reason about by reducing the number of places where a context switch can happen. Example:: from tornado import gen @gen.coroutine def fetch_coroutine(url): http_client = AsyncHTTPClient() response = yield http_client.fetch(url) # In Python versions prior to 3.3, returning a value from # a generator is not allowed and you must use # raise gen.Return(response.body) # instead. return response.body .. _native_coroutines: Python 3.5: ``async`` and ``await`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Python 3.5 introduces the ``async`` and ``await`` keywords (functions using these keywords are also called "native coroutines"). Starting in Tornado 4.3, you can use them in place of most ``yield``-based coroutines (see the following paragraphs for limitations). Simply use ``async def foo()`` in place of a function definition with the ``@gen.coroutine`` decorator, and ``await`` in place of yield. The rest of this document still uses the ``yield`` style for compatibility with older versions of Python, but ``async`` and ``await`` will run faster when they are available:: async def fetch_coroutine(url): http_client = AsyncHTTPClient() response = await http_client.fetch(url) return response.body The ``await`` keyword is less versatile than the ``yield`` keyword. For example, in a ``yield``-based coroutine you can yield a list of ``Futures``, while in a native coroutine you must wrap the list in `tornado.gen.multi`. This also eliminates the integration with `concurrent.futures`. You can use `tornado.gen.convert_yielded` to convert anything that would work with ``yield`` into a form that will work with ``await``:: async def f(): executor = concurrent.futures.ThreadPoolExecutor() await tornado.gen.convert_yielded(executor.submit(g)) While native coroutines are not visibly tied to a particular framework (i.e. they do not use a decorator like `tornado.gen.coroutine` or `asyncio.coroutine`), not all coroutines are compatible with each other. There is a *coroutine runner* which is selected by the first coroutine to be called, and then shared by all coroutines which are called directly with ``await``. The Tornado coroutine runner is designed to be versatile and accept awaitable objects from any framework; other coroutine runners may be more limited (for example, the ``asyncio`` coroutine runner does not accept coroutines from other frameworks). For this reason, it is recommended to use the Tornado coroutine runner for any application which combines multiple frameworks. To call a coroutine using the Tornado runner from within a coroutine that is already using the asyncio runner, use the `tornado.platform.asyncio.to_asyncio_future` adapter. How it works ~~~~~~~~~~~~ A function containing ``yield`` is a **generator**. All generators are asynchronous; when called they return a generator object instead of running to completion. The ``@gen.coroutine`` decorator communicates with the generator via the ``yield`` expressions, and with the coroutine's caller by returning a `.Future`. Here is a simplified version of the coroutine decorator's inner loop:: # Simplified inner loop of tornado.gen.Runner def run(self): # send(x) makes the current yield return x. # It returns when the next yield is reached future = self.gen.send(self.next) def callback(f): self.next = f.result() self.run() future.add_done_callback(callback) The decorator receives a `.Future` from the generator, waits (without blocking) for that `.Future` to complete, then "unwraps" the `.Future` and sends the result back into the generator as the result of the ``yield`` expression. Most asynchronous code never touches the `.Future` class directly except to immediately pass the `.Future` returned by an asynchronous function to a ``yield`` expression. How to call a coroutine ~~~~~~~~~~~~~~~~~~~~~~~ Coroutines do not raise exceptions in the normal way: any exception they raise will be trapped in the `.Future` until it is yielded. This means it is important to call coroutines in the right way, or you may have errors that go unnoticed:: @gen.coroutine def divide(x, y): return x / y def bad_call(): # This should raise a ZeroDivisionError, but it won't because # the coroutine is called incorrectly. divide(1, 0) In nearly all cases, any function that calls a coroutine must be a coroutine itself, and use the ``yield`` keyword in the call. When you are overriding a method defined in a superclass, consult the documentation to see if coroutines are allowed (the documentation should say that the method "may be a coroutine" or "may return a `.Future`"):: @gen.coroutine def good_call(): # yield will unwrap the Future returned by divide() and raise # the exception. yield divide(1, 0) Sometimes you may want to "fire and forget" a coroutine without waiting for its result. In this case it is recommended to use `.IOLoop.spawn_callback`, which makes the `.IOLoop` responsible for the call. If it fails, the `.IOLoop` will log a stack trace:: # The IOLoop will catch the exception and print a stack trace in # the logs. Note that this doesn't look like a normal call, since # we pass the function object to be called by the IOLoop. IOLoop.current().spawn_callback(divide, 1, 0) Using `.IOLoop.spawn_callback` in this way is *recommended* for functions using ``@gen.coroutine``, but it is *required* for functions using ``async def`` (otherwise the coroutine runner will not start). Finally, at the top level of a program, *if the IOLoop is not yet running,* you can start the `.IOLoop`, run the coroutine, and then stop the `.IOLoop` with the `.IOLoop.run_sync` method. This is often used to start the ``main`` function of a batch-oriented program:: # run_sync() doesn't take arguments, so we must wrap the # call in a lambda. IOLoop.current().run_sync(lambda: divide(1, 0)) Coroutine patterns ~~~~~~~~~~~~~~~~~~ Interaction with callbacks ^^^^^^^^^^^^^^^^^^^^^^^^^^ To interact with asynchronous code that uses callbacks instead of `.Future`, wrap the call in a `.Task`. This will add the callback argument for you and return a `.Future` which you can yield: .. testcode:: @gen.coroutine def call_task(): # Note that there are no parens on some_function. # This will be translated by Task into # some_function(other_args, callback=callback) yield gen.Task(some_function, other_args) .. testoutput:: :hide: Calling blocking functions ^^^^^^^^^^^^^^^^^^^^^^^^^^ The simplest way to call a blocking function from a coroutine is to use a `~concurrent.futures.ThreadPoolExecutor`, which returns ``Futures`` that are compatible with coroutines:: thread_pool = ThreadPoolExecutor(4) @gen.coroutine def call_blocking(): yield thread_pool.submit(blocking_func, args) Parallelism ^^^^^^^^^^^ The coroutine decorator recognizes lists and dicts whose values are ``Futures``, and waits for all of those ``Futures`` in parallel: .. testcode:: @gen.coroutine def parallel_fetch(url1, url2): resp1, resp2 = yield [http_client.fetch(url1), http_client.fetch(url2)] @gen.coroutine def parallel_fetch_many(urls): responses = yield [http_client.fetch(url) for url in urls] # responses is a list of HTTPResponses in the same order @gen.coroutine def parallel_fetch_dict(urls): responses = yield {url: http_client.fetch(url) for url in urls} # responses is a dict {url: HTTPResponse} .. testoutput:: :hide: Interleaving ^^^^^^^^^^^^ Sometimes it is useful to save a `.Future` instead of yielding it immediately, so you can start another operation before waiting: .. testcode:: @gen.coroutine def get(self): fetch_future = self.fetch_next_chunk() while True: chunk = yield fetch_future if chunk is None: break self.write(chunk) fetch_future = self.fetch_next_chunk() yield self.flush() .. testoutput:: :hide: This pattern is most usable with ``@gen.coroutine``. If ``fetch_next_chunk()`` uses ``async def``, then it must be called as ``fetch_future = tornado.gen.convert_yielded(self.fetch_next_chunk())`` to start the background processing. Looping ^^^^^^^ Looping is tricky with coroutines since there is no way in Python to ``yield`` on every iteration of a ``for`` or ``while`` loop and capture the result of the yield. Instead, you'll need to separate the loop condition from accessing the results, as in this example from `Motor `_:: import motor db = motor.MotorClient().test @gen.coroutine def loop_example(collection): cursor = db.collection.find() while (yield cursor.fetch_next): doc = cursor.next_object() Running in the background ^^^^^^^^^^^^^^^^^^^^^^^^^ `.PeriodicCallback` is not normally used with coroutines. Instead, a coroutine can contain a ``while True:`` loop and use `tornado.gen.sleep`:: @gen.coroutine def minute_loop(): while True: yield do_something() yield gen.sleep(60) # Coroutines that loop forever are generally started with # spawn_callback(). IOLoop.current().spawn_callback(minute_loop) Sometimes a more complicated loop may be desirable. For example, the previous loop runs every ``60+N`` seconds, where ``N`` is the running time of ``do_something()``. To run exactly every 60 seconds, use the interleaving pattern from above:: @gen.coroutine def minute_loop2(): while True: nxt = gen.sleep(60) # Start the clock. yield do_something() # Run while the clock is ticking. yield nxt # Wait for the timer to run out. tornado-4.5.3/docs/guide/intro.rst000066400000000000000000000031201322420601000170650ustar00rootroot00000000000000Introduction ------------ `Tornado `_ is a Python web framework and asynchronous networking library, originally developed at `FriendFeed `_. By using non-blocking network I/O, Tornado can scale to tens of thousands of open connections, making it ideal for `long polling `_, `WebSockets `_, and other applications that require a long-lived connection to each user. Tornado can be roughly divided into four major components: * A web framework (including `.RequestHandler` which is subclassed to create web applications, and various supporting classes). * Client- and server-side implementions of HTTP (`.HTTPServer` and `.AsyncHTTPClient`). * An asynchronous networking library including the classes `.IOLoop` and `.IOStream`, which serve as the building blocks for the HTTP components and can also be used to implement other protocols. * A coroutine library (`tornado.gen`) which allows asynchronous code to be written in a more straightforward way than chaining callbacks. The Tornado web framework and HTTP server together offer a full-stack alternative to `WSGI `_. While it is possible to use the Tornado web framework in a WSGI container (`.WSGIAdapter`), or use the Tornado HTTP server as a container for other WSGI frameworks (`.WSGIContainer`), each of these combinations has limitations and to take full advantage of Tornado you will need to use the Tornado's web framework and HTTP server together. tornado-4.5.3/docs/guide/queues.rst000066400000000000000000000023571322420601000172540ustar00rootroot00000000000000:class:`~tornado.queues.Queue` example - a concurrent web spider ================================================================ .. currentmodule:: tornado.queues Tornado's `tornado.queues` module implements an asynchronous producer / consumer pattern for coroutines, analogous to the pattern implemented for threads by the Python standard library's `queue` module. A coroutine that yields `Queue.get` pauses until there is an item in the queue. If the queue has a maximum size set, a coroutine that yields `Queue.put` pauses until there is room for another item. A `~Queue` maintains a count of unfinished tasks, which begins at zero. `~Queue.put` increments the count; `~Queue.task_done` decrements it. In the web-spider example here, the queue begins containing only base_url. When a worker fetches a page it parses the links and puts new ones in the queue, then calls `~Queue.task_done` to decrement the counter once. Eventually, a worker fetches a page whose URLs have all been seen before, and there is also no work left in the queue. Thus that worker's call to `~Queue.task_done` decrements the counter to zero. The main coroutine, which is waiting for `~Queue.join`, is unpaused and finishes. .. literalinclude:: ../../demos/webspider/webspider.py tornado-4.5.3/docs/guide/running.rst000066400000000000000000000271111322420601000174200ustar00rootroot00000000000000Running and deploying ===================== Since Tornado supplies its own HTTPServer, running and deploying it is a little different from other Python web frameworks. Instead of configuring a WSGI container to find your application, you write a ``main()`` function that starts the server: .. testcode:: def main(): app = make_app() app.listen(8888) IOLoop.current().start() if __name__ == '__main__': main() .. testoutput:: :hide: Configure your operating system or process manager to run this program to start the server. Please note that it may be necessary to increase the number of open files per process (to avoid "Too many open files"-Error). To raise this limit (setting it to 50000 for example) you can use the ulimit command, modify /etc/security/limits.conf or setting ``minfds`` in your supervisord config. Processes and ports ~~~~~~~~~~~~~~~~~~~ Due to the Python GIL (Global Interpreter Lock), it is necessary to run multiple Python processes to take full advantage of multi-CPU machines. Typically it is best to run one process per CPU. Tornado includes a built-in multi-process mode to start several processes at once. This requires a slight alteration to the standard main function: .. testcode:: def main(): app = make_app() server = tornado.httpserver.HTTPServer(app) server.bind(8888) server.start(0) # forks one process per cpu IOLoop.current().start() .. testoutput:: :hide: This is the easiest way to start multiple processes and have them all share the same port, although it has some limitations. First, each child process will have its own IOLoop, so it is important that nothing touch the global IOLoop instance (even indirectly) before the fork. Second, it is difficult to do zero-downtime updates in this model. Finally, since all the processes share the same port it is more difficult to monitor them individually. For more sophisticated deployments, it is recommended to start the processes independently, and have each one listen on a different port. The "process groups" feature of `supervisord `_ is one good way to arrange this. When each process uses a different port, an external load balancer such as HAProxy or nginx is usually needed to present a single address to outside visitors. Running behind a load balancer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When running behind a load balancer like nginx, it is recommended to pass ``xheaders=True`` to the `.HTTPServer` constructor. This will tell Tornado to use headers like ``X-Real-IP`` to get the user's IP address instead of attributing all traffic to the balancer's IP address. This is a barebones nginx config file that is structurally similar to the one we use at FriendFeed. It assumes nginx and the Tornado servers are running on the same machine, and the four Tornado servers are running on ports 8000 - 8003:: user nginx; worker_processes 1; error_log /var/log/nginx/error.log; pid /var/run/nginx.pid; events { worker_connections 1024; use epoll; } http { # Enumerate all the Tornado servers here upstream frontends { server 127.0.0.1:8000; server 127.0.0.1:8001; server 127.0.0.1:8002; server 127.0.0.1:8003; } include /etc/nginx/mime.types; default_type application/octet-stream; access_log /var/log/nginx/access.log; keepalive_timeout 65; proxy_read_timeout 200; sendfile on; tcp_nopush on; tcp_nodelay on; gzip on; gzip_min_length 1000; gzip_proxied any; gzip_types text/plain text/html text/css text/xml application/x-javascript application/xml application/atom+xml text/javascript; # Only retry if there was a communication error, not a timeout # on the Tornado server (to avoid propagating "queries of death" # to all frontends) proxy_next_upstream error; server { listen 80; # Allow file uploads client_max_body_size 50M; location ^~ /static/ { root /var/www; if ($query_string) { expires max; } } location = /favicon.ico { rewrite (.*) /static/favicon.ico; } location = /robots.txt { rewrite (.*) /static/robots.txt; } location / { proxy_pass_header Server; proxy_set_header Host $http_host; proxy_redirect off; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Scheme $scheme; proxy_pass http://frontends; } } } Static files and aggressive file caching ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can serve static files from Tornado by specifying the ``static_path`` setting in your application:: settings = { "static_path": os.path.join(os.path.dirname(__file__), "static"), "cookie_secret": "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", "login_url": "/login", "xsrf_cookies": True, } application = tornado.web.Application([ (r"/", MainHandler), (r"/login", LoginHandler), (r"/(apple-touch-icon\.png)", tornado.web.StaticFileHandler, dict(path=settings['static_path'])), ], **settings) This setting will automatically make all requests that start with ``/static/`` serve from that static directory, e.g., ``http://localhost:8888/static/foo.png`` will serve the file ``foo.png`` from the specified static directory. We also automatically serve ``/robots.txt`` and ``/favicon.ico`` from the static directory (even though they don't start with the ``/static/`` prefix). In the above settings, we have explicitly configured Tornado to serve ``apple-touch-icon.png`` from the root with the `.StaticFileHandler`, though it is physically in the static file directory. (The capturing group in that regular expression is necessary to tell `.StaticFileHandler` the requested filename; recall that capturing groups are passed to handlers as method arguments.) You could do the same thing to serve e.g. ``sitemap.xml`` from the site root. Of course, you can also avoid faking a root ``apple-touch-icon.png`` by using the appropriate ```` tag in your HTML. To improve performance, it is generally a good idea for browsers to cache static resources aggressively so browsers won't send unnecessary ``If-Modified-Since`` or ``Etag`` requests that might block the rendering of the page. Tornado supports this out of the box with *static content versioning*. To use this feature, use the `~.RequestHandler.static_url` method in your templates rather than typing the URL of the static file directly in your HTML:: FriendFeed - {{ _("Home") }}
The ``static_url()`` function will translate that relative path to a URI that looks like ``/static/images/logo.png?v=aae54``. The ``v`` argument is a hash of the content in ``logo.png``, and its presence makes the Tornado server send cache headers to the user's browser that will make the browser cache the content indefinitely. Since the ``v`` argument is based on the content of the file, if you update a file and restart your server, it will start sending a new ``v`` value, so the user's browser will automatically fetch the new file. If the file's contents don't change, the browser will continue to use a locally cached copy without ever checking for updates on the server, significantly improving rendering performance. In production, you probably want to serve static files from a more optimized static file server like `nginx `_. You can configure most any web server to recognize the version tags used by ``static_url()`` and set caching headers accordingly. Here is the relevant portion of the nginx configuration we use at FriendFeed:: location /static/ { root /var/friendfeed/static; if ($query_string) { expires max; } } .. _debug-mode: Debug mode and automatic reloading ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you pass ``debug=True`` to the ``Application`` constructor, the app will be run in debug/development mode. In this mode, several features intended for convenience while developing will be enabled (each of which is also available as an individual flag; if both are specified the individual flag takes precedence): * ``autoreload=True``: The app will watch for changes to its source files and reload itself when anything changes. This reduces the need to manually restart the server during development. However, certain failures (such as syntax errors at import time) can still take the server down in a way that debug mode cannot currently recover from. * ``compiled_template_cache=False``: Templates will not be cached. * ``static_hash_cache=False``: Static file hashes (used by the ``static_url`` function) will not be cached * ``serve_traceback=True``: When an exception in a `.RequestHandler` is not caught, an error page including a stack trace will be generated. Autoreload mode is not compatible with the multi-process mode of `.HTTPServer`. You must not give `HTTPServer.start <.TCPServer.start>` an argument other than 1 (or call `tornado.process.fork_processes`) if you are using autoreload mode. The automatic reloading feature of debug mode is available as a standalone module in `tornado.autoreload`. The two can be used in combination to provide extra robustness against syntax errors: set ``autoreload=True`` within the app to detect changes while it is running, and start it with ``python -m tornado.autoreload myserver.py`` to catch any syntax errors or other errors at startup. Reloading loses any Python interpreter command-line arguments (e.g. ``-u``) because it re-executes Python using `sys.executable` and `sys.argv`. Additionally, modifying these variables will cause reloading to behave incorrectly. On some platforms (including Windows and Mac OSX prior to 10.6), the process cannot be updated "in-place", so when a code change is detected the old server exits and a new one starts. This has been known to confuse some IDEs. WSGI and Google App Engine ~~~~~~~~~~~~~~~~~~~~~~~~~~ Tornado is normally intended to be run on its own, without a WSGI container. However, in some environments (such as Google App Engine), only WSGI is allowed and applications cannot run their own servers. In this case Tornado supports a limited mode of operation that does not support asynchronous operation but allows a subset of Tornado's functionality in a WSGI-only environment. The features that are not allowed in WSGI mode include coroutines, the ``@asynchronous`` decorator, `.AsyncHTTPClient`, the ``auth`` module, and WebSockets. You can convert a Tornado `.Application` to a WSGI application with `tornado.wsgi.WSGIAdapter`. In this example, configure your WSGI container to find the ``application`` object: .. testcode:: import tornado.web import tornado.wsgi class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") tornado_app = tornado.web.Application([ (r"/", MainHandler), ]) application = tornado.wsgi.WSGIAdapter(tornado_app) .. testoutput:: :hide: See the `appengine example application `_ for a full-featured AppEngine app built on Tornado. tornado-4.5.3/docs/guide/security.rst000066400000000000000000000256101322420601000176110ustar00rootroot00000000000000Authentication and security =========================== .. testsetup:: import tornado.web Cookies and secure cookies ~~~~~~~~~~~~~~~~~~~~~~~~~~ You can set cookies in the user's browser with the ``set_cookie`` method: .. testcode:: class MainHandler(tornado.web.RequestHandler): def get(self): if not self.get_cookie("mycookie"): self.set_cookie("mycookie", "myvalue") self.write("Your cookie was not set yet!") else: self.write("Your cookie was set!") .. testoutput:: :hide: Cookies are not secure and can easily be modified by clients. If you need to set cookies to, e.g., identify the currently logged in user, you need to sign your cookies to prevent forgery. Tornado supports signed cookies with the `~.RequestHandler.set_secure_cookie` and `~.RequestHandler.get_secure_cookie` methods. To use these methods, you need to specify a secret key named ``cookie_secret`` when you create your application. You can pass in application settings as keyword arguments to your application: .. testcode:: application = tornado.web.Application([ (r"/", MainHandler), ], cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__") .. testoutput:: :hide: Signed cookies contain the encoded value of the cookie in addition to a timestamp and an `HMAC `_ signature. If the cookie is old or if the signature doesn't match, ``get_secure_cookie`` will return ``None`` just as if the cookie isn't set. The secure version of the example above: .. testcode:: class MainHandler(tornado.web.RequestHandler): def get(self): if not self.get_secure_cookie("mycookie"): self.set_secure_cookie("mycookie", "myvalue") self.write("Your cookie was not set yet!") else: self.write("Your cookie was set!") .. testoutput:: :hide: Tornado's secure cookies guarantee integrity but not confidentiality. That is, the cookie cannot be modified but its contents can be seen by the user. The ``cookie_secret`` is a symmetric key and must be kept secret -- anyone who obtains the value of this key could produce their own signed cookies. By default, Tornado's secure cookies expire after 30 days. To change this, use the ``expires_days`` keyword argument to ``set_secure_cookie`` *and* the ``max_age_days`` argument to ``get_secure_cookie``. These two values are passed separately so that you may e.g. have a cookie that is valid for 30 days for most purposes, but for certain sensitive actions (such as changing billing information) you use a smaller ``max_age_days`` when reading the cookie. Tornado also supports multiple signing keys to enable signing key rotation. ``cookie_secret`` then must be a dict with integer key versions as keys and the corresponding secrets as values. The currently used signing key must then be set as ``key_version`` application setting but all other keys in the dict are allowed for cookie signature validation, if the correct key version is set in the cookie. To implement cookie updates, the current signing key version can be queried via `~.RequestHandler.get_secure_cookie_key_version`. .. _user-authentication: User authentication ~~~~~~~~~~~~~~~~~~~ The currently authenticated user is available in every request handler as `self.current_user <.RequestHandler.current_user>`, and in every template as ``current_user``. By default, ``current_user`` is ``None``. To implement user authentication in your application, you need to override the ``get_current_user()`` method in your request handlers to determine the current user based on, e.g., the value of a cookie. Here is an example that lets users log into the application simply by specifying a nickname, which is then saved in a cookie: .. testcode:: class BaseHandler(tornado.web.RequestHandler): def get_current_user(self): return self.get_secure_cookie("user") class MainHandler(BaseHandler): def get(self): if not self.current_user: self.redirect("/login") return name = tornado.escape.xhtml_escape(self.current_user) self.write("Hello, " + name) class LoginHandler(BaseHandler): def get(self): self.write('
' 'Name: ' '' '
') def post(self): self.set_secure_cookie("user", self.get_argument("name")) self.redirect("/") application = tornado.web.Application([ (r"/", MainHandler), (r"/login", LoginHandler), ], cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__") .. testoutput:: :hide: You can require that the user be logged in using the `Python decorator `_ `tornado.web.authenticated`. If a request goes to a method with this decorator, and the user is not logged in, they will be redirected to ``login_url`` (another application setting). The example above could be rewritten: .. testcode:: class MainHandler(BaseHandler): @tornado.web.authenticated def get(self): name = tornado.escape.xhtml_escape(self.current_user) self.write("Hello, " + name) settings = { "cookie_secret": "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", "login_url": "/login", } application = tornado.web.Application([ (r"/", MainHandler), (r"/login", LoginHandler), ], **settings) .. testoutput:: :hide: If you decorate ``post()`` methods with the ``authenticated`` decorator, and the user is not logged in, the server will send a ``403`` response. The ``@authenticated`` decorator is simply shorthand for ``if not self.current_user: self.redirect()`` and may not be appropriate for non-browser-based login schemes. Check out the `Tornado Blog example application `_ for a complete example that uses authentication (and stores user data in a MySQL database). Third party authentication ~~~~~~~~~~~~~~~~~~~~~~~~~~ The `tornado.auth` module implements the authentication and authorization protocols for a number of the most popular sites on the web, including Google/Gmail, Facebook, Twitter, and FriendFeed. The module includes methods to log users in via these sites and, where applicable, methods to authorize access to the service so you can, e.g., download a user's address book or publish a Twitter message on their behalf. Here is an example handler that uses Google for authentication, saving the Google credentials in a cookie for later access: .. testcode:: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): @tornado.gen.coroutine def get(self): if self.get_argument('code', False): user = yield self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) # Save the user with e.g. set_secure_cookie else: yield self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) .. testoutput:: :hide: See the `tornado.auth` module documentation for more details. .. _xsrf: Cross-site request forgery protection ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `Cross-site request forgery `_, or XSRF, is a common problem for personalized web applications. See the `Wikipedia article `_ for more information on how XSRF works. The generally accepted solution to prevent XSRF is to cookie every user with an unpredictable value and include that value as an additional argument with every form submission on your site. If the cookie and the value in the form submission do not match, then the request is likely forged. Tornado comes with built-in XSRF protection. To include it in your site, include the application setting ``xsrf_cookies``: .. testcode:: settings = { "cookie_secret": "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", "login_url": "/login", "xsrf_cookies": True, } application = tornado.web.Application([ (r"/", MainHandler), (r"/login", LoginHandler), ], **settings) .. testoutput:: :hide: If ``xsrf_cookies`` is set, the Tornado web application will set the ``_xsrf`` cookie for all users and reject all ``POST``, ``PUT``, and ``DELETE`` requests that do not contain a correct ``_xsrf`` value. If you turn this setting on, you need to instrument all forms that submit via ``POST`` to contain this field. You can do this with the special `.UIModule` ``xsrf_form_html()``, available in all templates::
{% module xsrf_form_html() %}
If you submit AJAX ``POST`` requests, you will also need to instrument your JavaScript to include the ``_xsrf`` value with each request. This is the `jQuery `_ function we use at FriendFeed for AJAX ``POST`` requests that automatically adds the ``_xsrf`` value to all requests:: function getCookie(name) { var r = document.cookie.match("\\b" + name + "=([^;]*)\\b"); return r ? r[1] : undefined; } jQuery.postJSON = function(url, args, callback) { args._xsrf = getCookie("_xsrf"); $.ajax({url: url, data: $.param(args), dataType: "text", type: "POST", success: function(response) { callback(eval("(" + response + ")")); }}); }; For ``PUT`` and ``DELETE`` requests (as well as ``POST`` requests that do not use form-encoded arguments), the XSRF token may also be passed via an HTTP header named ``X-XSRFToken``. The XSRF cookie is normally set when ``xsrf_form_html`` is used, but in a pure-Javascript application that does not use any regular forms you may need to access ``self.xsrf_token`` manually (just reading the property is enough to set the cookie as a side effect). If you need to customize XSRF behavior on a per-handler basis, you can override `.RequestHandler.check_xsrf_cookie()`. For example, if you have an API whose authentication does not use cookies, you may want to disable XSRF protection by making ``check_xsrf_cookie()`` do nothing. However, if you support both cookie and non-cookie-based authentication, it is important that XSRF protection be used whenever the current request is authenticated with a cookie. tornado-4.5.3/docs/guide/structure.rst000066400000000000000000000360311322420601000200010ustar00rootroot00000000000000.. currentmodule:: tornado.web .. testsetup:: import tornado.web Structure of a Tornado web application ====================================== A Tornado web application generally consists of one or more `.RequestHandler` subclasses, an `.Application` object which routes incoming requests to handlers, and a ``main()`` function to start the server. A minimal "hello world" example looks something like this: .. testcode:: import tornado.ioloop import tornado.web class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") def make_app(): return tornado.web.Application([ (r"/", MainHandler), ]) if __name__ == "__main__": app = make_app() app.listen(8888) tornado.ioloop.IOLoop.current().start() .. testoutput:: :hide: The ``Application`` object ~~~~~~~~~~~~~~~~~~~~~~~~~~ The `.Application` object is responsible for global configuration, including the routing table that maps requests to handlers. The routing table is a list of `.URLSpec` objects (or tuples), each of which contains (at least) a regular expression and a handler class. Order matters; the first matching rule is used. If the regular expression contains capturing groups, these groups are the *path arguments* and will be passed to the handler's HTTP method. If a dictionary is passed as the third element of the `.URLSpec`, it supplies the *initialization arguments* which will be passed to `.RequestHandler.initialize`. Finally, the `.URLSpec` may have a name, which will allow it to be used with `.RequestHandler.reverse_url`. For example, in this fragment the root URL ``/`` is mapped to ``MainHandler`` and URLs of the form ``/story/`` followed by a number are mapped to ``StoryHandler``. That number is passed (as a string) to ``StoryHandler.get``. :: class MainHandler(RequestHandler): def get(self): self.write('link to story 1' % self.reverse_url("story", "1")) class StoryHandler(RequestHandler): def initialize(self, db): self.db = db def get(self, story_id): self.write("this is story %s" % story_id) app = Application([ url(r"/", MainHandler), url(r"/story/([0-9]+)", StoryHandler, dict(db=db), name="story") ]) The `.Application` constructor takes many keyword arguments that can be used to customize the behavior of the application and enable optional features; see `.Application.settings` for the complete list. Subclassing ``RequestHandler`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Most of the work of a Tornado web application is done in subclasses of `.RequestHandler`. The main entry point for a handler subclass is a method named after the HTTP method being handled: ``get()``, ``post()``, etc. Each handler may define one or more of these methods to handle different HTTP actions. As described above, these methods will be called with arguments corresponding to the capturing groups of the routing rule that matched. Within a handler, call methods such as `.RequestHandler.render` or `.RequestHandler.write` to produce a response. ``render()`` loads a `.Template` by name and renders it with the given arguments. ``write()`` is used for non-template-based output; it accepts strings, bytes, and dictionaries (dicts will be encoded as JSON). Many methods in `.RequestHandler` are designed to be overridden in subclasses and be used throughout the application. It is common to define a ``BaseHandler`` class that overrides methods such as `~.RequestHandler.write_error` and `~.RequestHandler.get_current_user` and then subclass your own ``BaseHandler`` instead of `.RequestHandler` for all your specific handlers. Handling request input ~~~~~~~~~~~~~~~~~~~~~~ The request handler can access the object representing the current request with ``self.request``. See the class definition for `~tornado.httputil.HTTPServerRequest` for a complete list of attributes. Request data in the formats used by HTML forms will be parsed for you and is made available in methods like `~.RequestHandler.get_query_argument` and `~.RequestHandler.get_body_argument`. .. testcode:: class MyFormHandler(tornado.web.RequestHandler): def get(self): self.write('
' '' '' '
') def post(self): self.set_header("Content-Type", "text/plain") self.write("You wrote " + self.get_body_argument("message")) .. testoutput:: :hide: Since the HTML form encoding is ambiguous as to whether an argument is a single value or a list with one element, `.RequestHandler` has distinct methods to allow the application to indicate whether or not it expects a list. For lists, use `~.RequestHandler.get_query_arguments` and `~.RequestHandler.get_body_arguments` instead of their singular counterparts. Files uploaded via a form are available in ``self.request.files``, which maps names (the name of the HTML ```` element) to a list of files. Each file is a dictionary of the form ``{"filename":..., "content_type":..., "body":...}``. The ``files`` object is only present if the files were uploaded with a form wrapper (i.e. a ``multipart/form-data`` Content-Type); if this format was not used the raw uploaded data is available in ``self.request.body``. By default uploaded files are fully buffered in memory; if you need to handle files that are too large to comfortably keep in memory see the `.stream_request_body` class decorator. In the demos directory, `file_receiver.py `_ shows both methods of receiving file uploads. Due to the quirks of the HTML form encoding (e.g. the ambiguity around singular versus plural arguments), Tornado does not attempt to unify form arguments with other types of input. In particular, we do not parse JSON request bodies. Applications that wish to use JSON instead of form-encoding may override `~.RequestHandler.prepare` to parse their requests:: def prepare(self): if self.request.headers["Content-Type"].startswith("application/json"): self.json_args = json.loads(self.request.body) else: self.json_args = None Overriding RequestHandler methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In addition to ``get()``/``post()``/etc, certain other methods in `.RequestHandler` are designed to be overridden by subclasses when necessary. On every request, the following sequence of calls takes place: 1. A new `.RequestHandler` object is created on each request 2. `~.RequestHandler.initialize()` is called with the initialization arguments from the `.Application` configuration. ``initialize`` should typically just save the arguments passed into member variables; it may not produce any output or call methods like `~.RequestHandler.send_error`. 3. `~.RequestHandler.prepare()` is called. This is most useful in a base class shared by all of your handler subclasses, as ``prepare`` is called no matter which HTTP method is used. ``prepare`` may produce output; if it calls `~.RequestHandler.finish` (or ``redirect``, etc), processing stops here. 4. One of the HTTP methods is called: ``get()``, ``post()``, ``put()``, etc. If the URL regular expression contains capturing groups, they are passed as arguments to this method. 5. When the request is finished, `~.RequestHandler.on_finish()` is called. For synchronous handlers this is immediately after ``get()`` (etc) return; for asynchronous handlers it is after the call to `~.RequestHandler.finish()`. All methods designed to be overridden are noted as such in the `.RequestHandler` documentation. Some of the most commonly overridden methods include: - `~.RequestHandler.write_error` - outputs HTML for use on error pages. - `~.RequestHandler.on_connection_close` - called when the client disconnects; applications may choose to detect this case and halt further processing. Note that there is no guarantee that a closed connection can be detected promptly. - `~.RequestHandler.get_current_user` - see :ref:`user-authentication` - `~.RequestHandler.get_user_locale` - returns `.Locale` object to use for the current user - `~.RequestHandler.set_default_headers` - may be used to set additional headers on the response (such as a custom ``Server`` header) Error Handling ~~~~~~~~~~~~~~ If a handler raises an exception, Tornado will call `.RequestHandler.write_error` to generate an error page. `tornado.web.HTTPError` can be used to generate a specified status code; all other exceptions return a 500 status. The default error page includes a stack trace in debug mode and a one-line description of the error (e.g. "500: Internal Server Error") otherwise. To produce a custom error page, override `RequestHandler.write_error` (probably in a base class shared by all your handlers). This method may produce output normally via methods such as `~RequestHandler.write` and `~RequestHandler.render`. If the error was caused by an exception, an ``exc_info`` triple will be passed as a keyword argument (note that this exception is not guaranteed to be the current exception in `sys.exc_info`, so ``write_error`` must use e.g. `traceback.format_exception` instead of `traceback.format_exc`). It is also possible to generate an error page from regular handler methods instead of ``write_error`` by calling `~.RequestHandler.set_status`, writing a response, and returning. The special exception `tornado.web.Finish` may be raised to terminate the handler without calling ``write_error`` in situations where simply returning is not convenient. For 404 errors, use the ``default_handler_class`` `Application setting <.Application.settings>`. This handler should override `~.RequestHandler.prepare` instead of a more specific method like ``get()`` so it works with any HTTP method. It should produce its error page as described above: either by raising a ``HTTPError(404)`` and overriding ``write_error``, or calling ``self.set_status(404)`` and producing the response directly in ``prepare()``. Redirection ~~~~~~~~~~~ There are two main ways you can redirect requests in Tornado: `.RequestHandler.redirect` and with the `.RedirectHandler`. You can use ``self.redirect()`` within a `.RequestHandler` method to redirect users elsewhere. There is also an optional parameter ``permanent`` which you can use to indicate that the redirection is considered permanent. The default value of ``permanent`` is ``False``, which generates a ``302 Found`` HTTP response code and is appropriate for things like redirecting users after successful ``POST`` requests. If ``permanent`` is true, the ``301 Moved Permanently`` HTTP response code is used, which is useful for e.g. redirecting to a canonical URL for a page in an SEO-friendly manner. `.RedirectHandler` lets you configure redirects directly in your `.Application` routing table. For example, to configure a single static redirect:: app = tornado.web.Application([ url(r"/app", tornado.web.RedirectHandler, dict(url="http://itunes.apple.com/my-app-id")), ]) `.RedirectHandler` also supports regular expression substitutions. The following rule redirects all requests beginning with ``/pictures/`` to the prefix ``/photos/`` instead:: app = tornado.web.Application([ url(r"/photos/(.*)", MyPhotoHandler), url(r"/pictures/(.*)", tornado.web.RedirectHandler, dict(url=r"/photos/{0}")), ]) Unlike `.RequestHandler.redirect`, `.RedirectHandler` uses permanent redirects by default. This is because the routing table does not change at runtime and is presumed to be permanent, while redirects found in handlers are likely to be the result of other logic that may change. To send a temporary redirect with a `.RedirectHandler`, add ``permanent=False`` to the `.RedirectHandler` initialization arguments. Asynchronous handlers ~~~~~~~~~~~~~~~~~~~~~ Tornado handlers are synchronous by default: when the ``get()``/``post()`` method returns, the request is considered finished and the response is sent. Since all other requests are blocked while one handler is running, any long-running handler should be made asynchronous so it can call its slow operations in a non-blocking way. This topic is covered in more detail in :doc:`async`; this section is about the particulars of asynchronous techniques in `.RequestHandler` subclasses. The simplest way to make a handler asynchronous is to use the `.coroutine` decorator. This allows you to perform non-blocking I/O with the ``yield`` keyword, and no response will be sent until the coroutine has returned. See :doc:`coroutines` for more details. In some cases, coroutines may be less convenient than a callback-oriented style, in which case the `.tornado.web.asynchronous` decorator can be used instead. When this decorator is used the response is not automatically sent; instead the request will be kept open until some callback calls `.RequestHandler.finish`. It is up to the application to ensure that this method is called, or else the user's browser will simply hang. Here is an example that makes a call to the FriendFeed API using Tornado's built-in `.AsyncHTTPClient`: .. testcode:: class MainHandler(tornado.web.RequestHandler): @tornado.web.asynchronous def get(self): http = tornado.httpclient.AsyncHTTPClient() http.fetch("http://friendfeed-api.com/v2/feed/bret", callback=self.on_response) def on_response(self, response): if response.error: raise tornado.web.HTTPError(500) json = tornado.escape.json_decode(response.body) self.write("Fetched " + str(len(json["entries"])) + " entries " "from the FriendFeed API") self.finish() .. testoutput:: :hide: When ``get()`` returns, the request has not finished. When the HTTP client eventually calls ``on_response()``, the request is still open, and the response is finally flushed to the client with the call to ``self.finish()``. For comparison, here is the same example using a coroutine: .. testcode:: class MainHandler(tornado.web.RequestHandler): @tornado.gen.coroutine def get(self): http = tornado.httpclient.AsyncHTTPClient() response = yield http.fetch("http://friendfeed-api.com/v2/feed/bret") json = tornado.escape.json_decode(response.body) self.write("Fetched " + str(len(json["entries"])) + " entries " "from the FriendFeed API") .. testoutput:: :hide: For a more advanced asynchronous example, take a look at the `chat example application `_, which implements an AJAX chat room using `long polling `_. Users of long polling may want to override ``on_connection_close()`` to clean up after the client closes the connection (but see that method's docstring for caveats). tornado-4.5.3/docs/guide/templates.rst000066400000000000000000000303731322420601000177420ustar00rootroot00000000000000Templates and UI ================ .. testsetup:: import tornado.web Tornado includes a simple, fast, and flexible templating language. This section describes that language as well as related issues such as internationalization. Tornado can also be used with any other Python template language, although there is no provision for integrating these systems into `.RequestHandler.render`. Simply render the template to a string and pass it to `.RequestHandler.write` Configuring templates ~~~~~~~~~~~~~~~~~~~~~ By default, Tornado looks for template files in the same directory as the ``.py`` files that refer to them. To put your template files in a different directory, use the ``template_path`` `Application setting <.Application.settings>` (or override `.RequestHandler.get_template_path` if you have different template paths for different handlers). To load templates from a non-filesystem location, subclass `tornado.template.BaseLoader` and pass an instance as the ``template_loader`` application setting. Compiled templates are cached by default; to turn off this caching and reload templates so changes to the underlying files are always visible, use the application settings ``compiled_template_cache=False`` or ``debug=True``. Template syntax ~~~~~~~~~~~~~~~ A Tornado template is just HTML (or any other text-based format) with Python control sequences and expressions embedded within the markup:: {{ title }}
    {% for item in items %}
  • {{ escape(item) }}
  • {% end %}
If you saved this template as "template.html" and put it in the same directory as your Python file, you could render this template with: .. testcode:: class MainHandler(tornado.web.RequestHandler): def get(self): items = ["Item 1", "Item 2", "Item 3"] self.render("template.html", title="My title", items=items) .. testoutput:: :hide: Tornado templates support *control statements* and *expressions*. Control statements are surrounded by ``{%`` and ``%}``, e.g., ``{% if len(items) > 2 %}``. Expressions are surrounded by ``{{`` and ``}}``, e.g., ``{{ items[0] }}``. Control statements more or less map exactly to Python statements. We support ``if``, ``for``, ``while``, and ``try``, all of which are terminated with ``{% end %}``. We also support *template inheritance* using the ``extends`` and ``block`` statements, which are described in detail in the documentation for the `tornado.template`. Expressions can be any Python expression, including function calls. Template code is executed in a namespace that includes the following objects and functions (Note that this list applies to templates rendered using `.RequestHandler.render` and `~.RequestHandler.render_string`. If you're using the `tornado.template` module directly outside of a `.RequestHandler` many of these entries are not present). - ``escape``: alias for `tornado.escape.xhtml_escape` - ``xhtml_escape``: alias for `tornado.escape.xhtml_escape` - ``url_escape``: alias for `tornado.escape.url_escape` - ``json_encode``: alias for `tornado.escape.json_encode` - ``squeeze``: alias for `tornado.escape.squeeze` - ``linkify``: alias for `tornado.escape.linkify` - ``datetime``: the Python `datetime` module - ``handler``: the current `.RequestHandler` object - ``request``: alias for `handler.request <.HTTPServerRequest>` - ``current_user``: alias for `handler.current_user <.RequestHandler.current_user>` - ``locale``: alias for `handler.locale <.Locale>` - ``_``: alias for `handler.locale.translate <.Locale.translate>` - ``static_url``: alias for `handler.static_url <.RequestHandler.static_url>` - ``xsrf_form_html``: alias for `handler.xsrf_form_html <.RequestHandler.xsrf_form_html>` - ``reverse_url``: alias for `.Application.reverse_url` - All entries from the ``ui_methods`` and ``ui_modules`` ``Application`` settings - Any keyword arguments passed to `~.RequestHandler.render` or `~.RequestHandler.render_string` When you are building a real application, you are going to want to use all of the features of Tornado templates, especially template inheritance. Read all about those features in the `tornado.template` section (some features, including ``UIModules`` are implemented in the `tornado.web` module) Under the hood, Tornado templates are translated directly to Python. The expressions you include in your template are copied verbatim into a Python function representing your template. We don't try to prevent anything in the template language; we created it explicitly to provide the flexibility that other, stricter templating systems prevent. Consequently, if you write random stuff inside of your template expressions, you will get random Python errors when you execute the template. All template output is escaped by default, using the `tornado.escape.xhtml_escape` function. This behavior can be changed globally by passing ``autoescape=None`` to the `.Application` or `.tornado.template.Loader` constructors, for a template file with the ``{% autoescape None %}`` directive, or for a single expression by replacing ``{{ ... }}`` with ``{% raw ...%}``. Additionally, in each of these places the name of an alternative escaping function may be used instead of ``None``. Note that while Tornado's automatic escaping is helpful in avoiding XSS vulnerabilities, it is not sufficient in all cases. Expressions that appear in certain locations, such as in Javascript or CSS, may need additional escaping. Additionally, either care must be taken to always use double quotes and `.xhtml_escape` in HTML attributes that may contain untrusted content, or a separate escaping function must be used for attributes (see e.g. http://wonko.com/post/html-escaping) Internationalization ~~~~~~~~~~~~~~~~~~~~ The locale of the current user (whether they are logged in or not) is always available as ``self.locale`` in the request handler and as ``locale`` in templates. The name of the locale (e.g., ``en_US``) is available as ``locale.name``, and you can translate strings with the `.Locale.translate` method. Templates also have the global function call ``_()`` available for string translation. The translate function has two forms:: _("Translate this string") which translates the string directly based on the current locale, and:: _("A person liked this", "%(num)d people liked this", len(people)) % {"num": len(people)} which translates a string that can be singular or plural based on the value of the third argument. In the example above, a translation of the first string will be returned if ``len(people)`` is ``1``, or a translation of the second string will be returned otherwise. The most common pattern for translations is to use Python named placeholders for variables (the ``%(num)d`` in the example above) since placeholders can move around on translation. Here is a properly internationalized template:: FriendFeed - {{ _("Sign in") }}
{{ _("Username") }}
{{ _("Password") }}
{% module xsrf_form_html() %}
By default, we detect the user's locale using the ``Accept-Language`` header sent by the user's browser. We choose ``en_US`` if we can't find an appropriate ``Accept-Language`` value. If you let user's set their locale as a preference, you can override this default locale selection by overriding `.RequestHandler.get_user_locale`: .. testcode:: class BaseHandler(tornado.web.RequestHandler): def get_current_user(self): user_id = self.get_secure_cookie("user") if not user_id: return None return self.backend.get_user_by_id(user_id) def get_user_locale(self): if "locale" not in self.current_user.prefs: # Use the Accept-Language header return None return self.current_user.prefs["locale"] .. testoutput:: :hide: If ``get_user_locale`` returns ``None``, we fall back on the ``Accept-Language`` header. The `tornado.locale` module supports loading translations in two formats: the ``.mo`` format used by `gettext` and related tools, and a simple ``.csv`` format. An application will generally call either `tornado.locale.load_translations` or `tornado.locale.load_gettext_translations` once at startup; see those methods for more details on the supported formats.. You can get the list of supported locales in your application with `tornado.locale.get_supported_locales()`. The user's locale is chosen to be the closest match based on the supported locales. For example, if the user's locale is ``es_GT``, and the ``es`` locale is supported, ``self.locale`` will be ``es`` for that request. We fall back on ``en_US`` if no close match can be found. .. _ui-modules: UI modules ~~~~~~~~~~ Tornado supports *UI modules* to make it easy to support standard, reusable UI widgets across your application. UI modules are like special function calls to render components of your page, and they can come packaged with their own CSS and JavaScript. For example, if you are implementing a blog, and you want to have blog entries appear on both the blog home page and on each blog entry page, you can make an ``Entry`` module to render them on both pages. First, create a Python module for your UI modules, e.g., ``uimodules.py``:: class Entry(tornado.web.UIModule): def render(self, entry, show_comments=False): return self.render_string( "module-entry.html", entry=entry, show_comments=show_comments) Tell Tornado to use ``uimodules.py`` using the ``ui_modules`` setting in your application:: from . import uimodules class HomeHandler(tornado.web.RequestHandler): def get(self): entries = self.db.query("SELECT * FROM entries ORDER BY date DESC") self.render("home.html", entries=entries) class EntryHandler(tornado.web.RequestHandler): def get(self, entry_id): entry = self.db.get("SELECT * FROM entries WHERE id = %s", entry_id) if not entry: raise tornado.web.HTTPError(404) self.render("entry.html", entry=entry) settings = { "ui_modules": uimodules, } application = tornado.web.Application([ (r"/", HomeHandler), (r"/entry/([0-9]+)", EntryHandler), ], **settings) Within a template, you can call a module with the ``{% module %}`` statement. For example, you could call the ``Entry`` module from both ``home.html``:: {% for entry in entries %} {% module Entry(entry) %} {% end %} and ``entry.html``:: {% module Entry(entry, show_comments=True) %} Modules can include custom CSS and JavaScript functions by overriding the ``embedded_css``, ``embedded_javascript``, ``javascript_files``, or ``css_files`` methods:: class Entry(tornado.web.UIModule): def embedded_css(self): return ".entry { margin-bottom: 1em; }" def render(self, entry, show_comments=False): return self.render_string( "module-entry.html", show_comments=show_comments) Module CSS and JavaScript will be included once no matter how many times a module is used on a page. CSS is always included in the ```` of the page, and JavaScript is always included just before the ```` tag at the end of the page. When additional Python code is not required, a template file itself may be used as a module. For example, the preceding example could be rewritten to put the following in ``module-entry.html``:: {{ set_resources(embedded_css=".entry { margin-bottom: 1em; }") }} This revised template module would be invoked with:: {% module Template("module-entry.html", show_comments=True) %} The ``set_resources`` function is only available in templates invoked via ``{% module Template(...) %}``. Unlike the ``{% include ... %}`` directive, template modules have a distinct namespace from their containing template - they can only see the global template namespace and their own keyword arguments. tornado-4.5.3/docs/http.rst000066400000000000000000000001741322420601000156220ustar00rootroot00000000000000HTTP servers and clients ======================== .. toctree:: httpserver httpclient httputil http1connection tornado-4.5.3/docs/http1connection.rst000066400000000000000000000003001322420601000177520ustar00rootroot00000000000000``tornado.http1connection`` -- HTTP/1.x client/server implementation ==================================================================== .. automodule:: tornado.http1connection :members: tornado-4.5.3/docs/httpclient.rst000066400000000000000000000027461322420601000170300ustar00rootroot00000000000000``tornado.httpclient`` --- Asynchronous HTTP client =================================================== .. automodule:: tornado.httpclient HTTP client interfaces ---------------------- .. autoclass:: HTTPClient :members: .. autoclass:: AsyncHTTPClient :members: Request objects --------------- .. autoclass:: HTTPRequest :members: Response objects ---------------- .. autoclass:: HTTPResponse :members: Exceptions ---------- .. autoexception:: HTTPError :members: Command-line interface ---------------------- This module provides a simple command-line interface to fetch a url using Tornado's HTTP client. Example usage:: # Fetch the url and print its body python -m tornado.httpclient http://www.google.com # Just print the headers python -m tornado.httpclient --print_headers --print_body=false http://www.google.com Implementations ~~~~~~~~~~~~~~~ .. automodule:: tornado.simple_httpclient :members: .. module:: tornado.curl_httpclient .. class:: CurlAsyncHTTPClient(io_loop, max_clients=10, defaults=None) ``libcurl``-based HTTP client. Example Code ~~~~~~~~~~~~ * `A simple webspider `_ shows how to fetch URLs concurrently. * `The file uploader demo `_ uses either HTTP POST or HTTP PUT to upload files to a server. tornado-4.5.3/docs/httpserver.rst000066400000000000000000000003301322420601000170430ustar00rootroot00000000000000``tornado.httpserver`` --- Non-blocking HTTP server =================================================== .. automodule:: tornado.httpserver HTTP Server ----------- .. autoclass:: HTTPServer :members: tornado-4.5.3/docs/httputil.rst000066400000000000000000000003261322420601000165170ustar00rootroot00000000000000``tornado.httputil`` --- Manipulate HTTP headers and URLs ========================================================= .. testsetup:: from tornado.httputil import * .. automodule:: tornado.httputil :members: tornado-4.5.3/docs/index.rst000066400000000000000000000122761322420601000157600ustar00rootroot00000000000000.. title:: Tornado Web Server .. meta:: :google-site-verification: g4bVhgwbVO1d9apCUsT-eKlApg31Cygbp8VGZY8Rf0g |Tornado Web Server| ==================== .. |Tornado Web Server| image:: tornado.png :alt: Tornado Web Server `Tornado `_ is a Python web framework and asynchronous networking library, originally developed at `FriendFeed `_. By using non-blocking network I/O, Tornado can scale to tens of thousands of open connections, making it ideal for `long polling `_, `WebSockets `_, and other applications that require a long-lived connection to each user. Quick links ----------- * Current version: |version| (`download from PyPI `_, :doc:`release notes `) * `Source (github) `_ * Mailing lists: `discussion `_ and `announcements `_ * `Stack Overflow `_ * `Wiki `_ Hello, world ------------ Here is a simple "Hello, world" example web app for Tornado:: import tornado.ioloop import tornado.web class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") def make_app(): return tornado.web.Application([ (r"/", MainHandler), ]) if __name__ == "__main__": app = make_app() app.listen(8888) tornado.ioloop.IOLoop.current().start() This example does not use any of Tornado's asynchronous features; for that see this `simple chat room `_. Installation ------------ :: pip install tornado Tornado is listed in `PyPI `_ and can be installed with ``pip``. Note that the source distribution includes demo applications that are not present when Tornado is installed in this way, so you may wish to download a copy of the source tarball or clone the `git repository `_ as well. **Prerequisites**: Tornado runs on Python 2.7, and 3.3+ For Python 2, version 2.7.9 or newer is *strongly* recommended for the improved SSL support. In addition to the requirements which will be installed automatically by ``pip`` or ``setup.py install``, the following optional packages may be useful: * `concurrent.futures `_ is the recommended thread pool for use with Tornado and enables the use of `~tornado.netutil.ThreadedResolver`. It is needed only on Python 2; Python 3 includes this package in the standard library. * `pycurl `_ is used by the optional ``tornado.curl_httpclient``. Libcurl version 7.22 or higher is required. * `Twisted `_ may be used with the classes in `tornado.platform.twisted`. * `pycares `_ is an alternative non-blocking DNS resolver that can be used when threads are not appropriate. * `monotonic `_ or `Monotime `_ add support for a monotonic clock, which improves reliability in environments where clock adjustements are frequent. No longer needed in Python 3.3. **Platforms**: Tornado should run on any Unix-like platform, although for the best performance and scalability only Linux (with ``epoll``) and BSD (with ``kqueue``) are recommended for production deployment (even though Mac OS X is derived from BSD and supports kqueue, its networking performance is generally poor so it is recommended only for development use). Tornado will also run on Windows, although this configuration is not officially supported and is recommended only for development use. Without reworking Tornado IOLoop interface, it's not possible to add a native Tornado Windows IOLoop implementation or leverage Windows' IOCP support from frameworks like AsyncIO or Twisted. Documentation ------------- This documentation is also available in `PDF and Epub formats `_. .. toctree:: :titlesonly: guide webframework http networking coroutine integration utilities faq releases * :ref:`genindex` * :ref:`modindex` * :ref:`search` Discussion and support ---------------------- You can discuss Tornado on `the Tornado developer mailing list `_, and report bugs on the `GitHub issue tracker `_. Links to additional resources can be found on the `Tornado wiki `_. New releases are announced on the `announcements mailing list `_. Tornado is available under the `Apache License, Version 2.0 `_. This web site and all documentation is licensed under `Creative Commons 3.0 `_. tornado-4.5.3/docs/integration.rst000066400000000000000000000002061322420601000171620ustar00rootroot00000000000000Integration with other services =============================== .. toctree:: auth wsgi asyncio caresresolver twisted tornado-4.5.3/docs/ioloop.rst000066400000000000000000000030741322420601000161460ustar00rootroot00000000000000``tornado.ioloop`` --- Main event loop ====================================== .. automodule:: tornado.ioloop IOLoop objects -------------- .. autoclass:: IOLoop Running an IOLoop ^^^^^^^^^^^^^^^^^ .. automethod:: IOLoop.current .. automethod:: IOLoop.make_current .. automethod:: IOLoop.instance .. automethod:: IOLoop.initialized .. automethod:: IOLoop.install .. automethod:: IOLoop.clear_instance .. automethod:: IOLoop.start .. automethod:: IOLoop.stop .. automethod:: IOLoop.run_sync .. automethod:: IOLoop.close I/O events ^^^^^^^^^^ .. automethod:: IOLoop.add_handler .. automethod:: IOLoop.update_handler .. automethod:: IOLoop.remove_handler Callbacks and timeouts ^^^^^^^^^^^^^^^^^^^^^^ .. automethod:: IOLoop.add_callback .. automethod:: IOLoop.add_callback_from_signal .. automethod:: IOLoop.add_future .. automethod:: IOLoop.add_timeout .. automethod:: IOLoop.call_at .. automethod:: IOLoop.call_later .. automethod:: IOLoop.remove_timeout .. automethod:: IOLoop.spawn_callback .. automethod:: IOLoop.time .. autoclass:: PeriodicCallback :members: Debugging and error handling ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automethod:: IOLoop.handle_callback_exception .. automethod:: IOLoop.set_blocking_signal_threshold .. automethod:: IOLoop.set_blocking_log_threshold .. automethod:: IOLoop.log_stack Methods for subclasses ^^^^^^^^^^^^^^^^^^^^^^ .. automethod:: IOLoop.initialize .. automethod:: IOLoop.close_fd .. automethod:: IOLoop.split_fd tornado-4.5.3/docs/iostream.rst000066400000000000000000000025101322420601000164620ustar00rootroot00000000000000``tornado.iostream`` --- Convenient wrappers for non-blocking sockets ===================================================================== .. automodule:: tornado.iostream Base class ---------- .. autoclass:: BaseIOStream Main interface ^^^^^^^^^^^^^^ .. automethod:: BaseIOStream.write .. automethod:: BaseIOStream.read_bytes .. automethod:: BaseIOStream.read_until .. automethod:: BaseIOStream.read_until_regex .. automethod:: BaseIOStream.read_until_close .. automethod:: BaseIOStream.close .. automethod:: BaseIOStream.set_close_callback .. automethod:: BaseIOStream.closed .. automethod:: BaseIOStream.reading .. automethod:: BaseIOStream.writing .. automethod:: BaseIOStream.set_nodelay Methods for subclasses ^^^^^^^^^^^^^^^^^^^^^^ .. automethod:: BaseIOStream.fileno .. automethod:: BaseIOStream.close_fd .. automethod:: BaseIOStream.write_to_fd .. automethod:: BaseIOStream.read_from_fd .. automethod:: BaseIOStream.get_fd_error Implementations --------------- .. autoclass:: IOStream :members: .. autoclass:: SSLIOStream :members: .. autoclass:: PipeIOStream :members: Exceptions ---------- .. autoexception:: StreamBufferFullError .. autoexception:: StreamClosedError .. autoexception:: UnsatisfiableReadError tornado-4.5.3/docs/locale.rst000066400000000000000000000002251322420601000160770ustar00rootroot00000000000000``tornado.locale`` --- Internationalization support =================================================== .. automodule:: tornado.locale :members: tornado-4.5.3/docs/locks.rst000066400000000000000000000016401322420601000157550ustar00rootroot00000000000000``tornado.locks`` -- Synchronization primitives =============================================== .. versionadded:: 4.2 Coordinate coroutines with synchronization primitives analogous to those the standard library provides to threads. .. warning:: Note that these primitives are not actually thread-safe and cannot be used in place of those from the standard library--they are meant to coordinate Tornado coroutines in a single-threaded app, not to protect shared objects in a multithreaded app. .. automodule:: tornado.locks Condition --------- .. autoclass:: Condition :members: Event ----- .. autoclass:: Event :members: Semaphore --------- .. autoclass:: Semaphore :members: BoundedSemaphore ---------------- .. autoclass:: BoundedSemaphore :members: :inherited-members: Lock ---- .. autoclass:: Lock :members: :inherited-members: tornado-4.5.3/docs/log.rst000066400000000000000000000001621322420601000154210ustar00rootroot00000000000000``tornado.log`` --- Logging support =================================== .. automodule:: tornado.log :members: tornado-4.5.3/docs/netutil.rst000066400000000000000000000002361322420601000163260ustar00rootroot00000000000000``tornado.netutil`` --- Miscellaneous network utilities ======================================================= .. automodule:: tornado.netutil :members: tornado-4.5.3/docs/networking.rst000066400000000000000000000001721322420601000170300ustar00rootroot00000000000000Asynchronous networking ======================= .. toctree:: ioloop iostream netutil tcpclient tcpserver tornado-4.5.3/docs/options.rst000066400000000000000000000011501322420601000163310ustar00rootroot00000000000000``tornado.options`` --- Command-line parsing ============================================ .. automodule:: tornado.options Global functions ---------------- .. autofunction:: define .. py:data:: options Global options object. All defined options are available as attributes on this object. .. autofunction:: parse_command_line .. autofunction:: parse_config_file .. autofunction:: print_help(file=sys.stderr) .. autofunction:: add_parse_callback .. autoexception:: Error OptionParser class ------------------ .. autoclass:: OptionParser :members: tornado-4.5.3/docs/process.rst000066400000000000000000000003731322420601000163220ustar00rootroot00000000000000``tornado.process`` --- Utilities for multiple processes ======================================================== .. automodule:: tornado.process :members: .. exception:: CalledProcessError An alias for `subprocess.CalledProcessError`. tornado-4.5.3/docs/queues.rst000066400000000000000000000010131322420601000161430ustar00rootroot00000000000000``tornado.queues`` -- Queues for coroutines =========================================== .. versionadded:: 4.2 .. automodule:: tornado.queues Classes ------- Queue ^^^^^ .. autoclass:: Queue :members: PriorityQueue ^^^^^^^^^^^^^ .. autoclass:: PriorityQueue :members: LifoQueue ^^^^^^^^^ .. autoclass:: LifoQueue :members: Exceptions ---------- QueueEmpty ^^^^^^^^^^ .. autoexception:: QueueEmpty QueueFull ^^^^^^^^^ .. autoexception:: QueueFull tornado-4.5.3/docs/releases.rst000066400000000000000000000013721322420601000164470ustar00rootroot00000000000000Release notes ============= .. toctree:: :maxdepth: 2 releases/v4.5.3 releases/v4.5.2 releases/v4.5.1 releases/v4.5.0 releases/v4.4.3 releases/v4.4.2 releases/v4.4.1 releases/v4.4.0 releases/v4.3.0 releases/v4.2.1 releases/v4.2.0 releases/v4.1.0 releases/v4.0.2 releases/v4.0.1 releases/v4.0.0 releases/v3.2.2 releases/v3.2.1 releases/v3.2.0 releases/v3.1.1 releases/v3.1.0 releases/v3.0.2 releases/v3.0.1 releases/v3.0.0 releases/v2.4.1 releases/v2.4.0 releases/v2.3.0 releases/v2.2.1 releases/v2.2.0 releases/v2.1.1 releases/v2.1.0 releases/v2.0.0 releases/v1.2.1 releases/v1.2.0 releases/v1.1.1 releases/v1.1.0 releases/v1.0.1 releases/v1.0.0 tornado-4.5.3/docs/releases/000077500000000000000000000000001322420601000157125ustar00rootroot00000000000000tornado-4.5.3/docs/releases/v1.0.0.rst000066400000000000000000000051531322420601000172720ustar00rootroot00000000000000What's new in Tornado 1.0 ========================= July 22, 2010 ------------- :: We are pleased to announce the release of Tornado 1.0, available from https://github.com/downloads/facebook/tornado/tornado-1.0.tar.gz. There have been many changes since version 0.2; here are some of the highlights: New features: * Improved support for running other WSGI applications in a Tornado server (tested with Django and CherryPy) * Improved performance on Mac OS X and BSD (kqueue-based IOLoop), and experimental support for win32 * Rewritten AsyncHTTPClient available as tornado.httpclient.AsyncHTTPClient2 (this will become the default in a future release) * Support for standard .mo files in addition to .csv in the locale module * Pre-forking support for running multiple Tornado processes at once (see HTTPServer.start()) * SSL and gzip support in HTTPServer * reverse_url() function refers to urls from the Application config by name from templates and RequestHandlers * RequestHandler.on_connection_close() callback is called when the client has closed the connection (subject to limitations of the underlying network stack, any proxies, etc) * Static files can now be served somewhere other than /static/ via the static_url_prefix application setting * URL regexes can now use named groups ("(?P)") to pass arguments to get()/post() via keyword instead of position * HTTP header dictionary-like objects now support multiple values for the same header via the get_all() and add() methods. * Several new options in the httpclient module, including prepare_curl_callback and header_callback * Improved logging configuration in tornado.options. * UIModule.html_body() can be used to return html to be inserted at the end of the document body. Backwards-incompatible changes: * RequestHandler.get_error_html() now receives the exception object as a keyword argument if the error was caused by an uncaught exception. * Secure cookies are now more secure, but incompatible with cookies set by Tornado 0.2. To read cookies set by older versions of Tornado, pass include_name=False to RequestHandler.get_secure_cookie() * Parameters passed to RequestHandler.get/post() by extraction from the path now have %-escapes decoded, for consistency with the processing that was already done with other query parameters. Many thanks to everyone who contributed patches, bug reports, and feedback that went into this release! -Ben tornado-4.5.3/docs/releases/v1.0.1.rst000066400000000000000000000004061322420601000172670ustar00rootroot00000000000000What's new in Tornado 1.0.1 =========================== Aug 13, 2010 ------------ :: This release fixes a bug with RequestHandler.get_secure_cookie, which would in some circumstances allow an attacker to tamper with data stored in the cookie. tornado-4.5.3/docs/releases/v1.1.0.rst000066400000000000000000000052051322420601000172710ustar00rootroot00000000000000What's new in Tornado 1.1 ========================= Sep 7, 2010 ----------- :: We are pleased to announce the release of Tornado 1.1, available from https://github.com/downloads/facebook/tornado/tornado-1.1.tar.gz Changes in this release: * RequestHandler.async_callback and related functions in other classes are no longer needed in most cases (although it's harmless to continue using them). Uncaught exceptions will now cause the request to be closed even in a callback. If you're curious how this works, see the new tornado.stack_context module. * The new tornado.testing module contains support for unit testing asynchronous IOLoop-based code. * AsyncHTTPClient has been rewritten (the new implementation was available as AsyncHTTPClient2 in Tornado 1.0; both names are supported for backwards compatibility). * The tornado.auth module has had a number of updates, including support for OAuth 2.0 and the Facebook Graph API, and upgrading Twitter and Google support to OAuth 1.0a. * The websocket module is back and supports the latest version (76) of the websocket protocol. Note that this module's interface is different from the websocket module that appeared in pre-1.0 versions of Tornado. * New method RequestHandler.initialize() can be overridden in subclasses to simplify handling arguments from URLSpecs. The sequence of methods called during initialization is documented at http://tornadoweb.org/documentation#overriding-requesthandler-methods * get_argument() and related methods now work on PUT requests in addition to POST. * The httpclient module now supports HTTP proxies. * When HTTPServer is run in SSL mode, the SSL handshake is now non-blocking. * Many smaller bug fixes and documentation updates Backwards-compatibility notes: * While most users of Tornado should not have to deal with the stack_context module directly, users of worker thread pools and similar constructs may need to use stack_context.wrap and/or NullContext to avoid memory leaks. * The new AsyncHTTPClient still works with libcurl version 7.16.x, but it performs better when both libcurl and pycurl are at least version 7.18.2. * OAuth transactions started under previous versions of the auth module cannot be completed under the new module. This applies only to the initial authorization process; once an authorized token is issued that token works with either version. Many thanks to everyone who contributed patches, bug reports, and feedback that went into this release! -Ben tornado-4.5.3/docs/releases/v1.1.1.rst000066400000000000000000000015511322420601000172720ustar00rootroot00000000000000What's new in Tornado 1.1.1 =========================== Feb 8, 2011 ----------- :: Tornado 1.1.1 is a BACKWARDS-INCOMPATIBLE security update that fixes an XSRF vulnerability. It is available at https://github.com/downloads/facebook/tornado/tornado-1.1.1.tar.gz This is a backwards-incompatible change. Applications that previously relied on a blanket exception for XMLHTTPRequest may need to be modified to explicitly include the XSRF token when making ajax requests. The tornado chat demo application demonstrates one way of adding this token (specifically the function postJSON in demos/chat/static/chat.js). More information about this change and its justification can be found at http://www.djangoproject.com/weblog/2011/feb/08/security/ http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails tornado-4.5.3/docs/releases/v1.2.0.rst000066400000000000000000000131571322420601000172770ustar00rootroot00000000000000What's new in Tornado 1.2 ========================= Feb 20, 2011 ------------ :: We are pleased to announce the release of Tornado 1.2, available from https://github.com/downloads/facebook/tornado/tornado-1.2.tar.gz Backwards compatibility notes: * This release includes the backwards-incompatible security change from version 1.1.1. Users upgrading from 1.1 or earlier should read the release notes from that release: http://groups.google.com/group/python-tornado/browse_thread/thread/b36191c781580cde * StackContexts that do something other than catch exceptions may need to be modified to be reentrant. https://github.com/tornadoweb/tornado/commit/7a7e24143e77481d140fb5579bc67e4c45cbcfad * When XSRF tokens are used, the token must also be present on PUT and DELETE requests (anything but GET and HEAD) New features: * A new HTTP client implementation is available in the module tornado.simple_httpclient. This HTTP client does not depend on pycurl. It has not yet been tested extensively in production, but is intended to eventually replace the pycurl-based HTTP client in a future release of Tornado. To transparently replace tornado.httpclient.AsyncHTTPClient with this new implementation, you can set the environment variable USE_SIMPLE_HTTPCLIENT=1 (note that the next release of Tornado will likely include a different way to select HTTP client implementations) * Request logging is now done by the Application rather than the RequestHandler. Logging behavior may be customized by either overriding Application.log_request in a subclass or by passing log_function as an Application setting * Application.listen(port): Convenience method as an alternative to explicitly creating an HTTPServer * tornado.escape.linkify(): Wrap urls in tags * RequestHandler.create_signed_value(): Create signatures like the secure_cookie methods without setting cookies. * tornado.testing.get_unused_port(): Returns a port selected in the same way as inAsyncHTTPTestCase * AsyncHTTPTestCase.fetch(): Convenience method for synchronous fetches * IOLoop.set_blocking_signal_threshold(): Set a callback to be run when the IOLoop is blocked. * IOStream.connect(): Asynchronously connect a client socket * AsyncHTTPClient.handle_callback_exception(): May be overridden in subclass for custom error handling * httpclient.HTTPRequest has two new keyword arguments, validate_cert and ca_certs. Setting validate_cert=False will disable all certificate checks when fetching https urls. ca_certs may be set to a filename containing trusted certificate authorities (defaults will be used if this is unspecified) * HTTPRequest.get_ssl_certificate(): Returns the client's SSL certificate (if client certificates were requested in the server's ssl_options * StaticFileHandler can be configured to return a default file (e.g. index.html) when a directory is requested * Template directives of the form "{% from x import y %}" are now supported (in addition to the existing support for "{% import x %}" * FacebookGraphMixin.get_authenticated_user now accepts a new parameter 'extra_fields' which may be used to request additional information about the user Bug fixes: * auth: Fixed KeyError with Facebook offline_access * auth: Uses request.uri instead of request.path as the default redirect so that parameters are preserved. * escape: xhtml_escape() now returns a unicode string, not utf8-encoded bytes * ioloop: Callbacks added with add_callback are now run in the order they were added * ioloop: PeriodicCallback.stop can now be called from inside the callback. * iostream: Fixed several bugs in SSLIOStream * iostream: Detect when the other side has closed the connection even with the select()-based IOLoop * iostream: read_bytes(0) now works as expected * iostream: Fixed bug when writing large amounts of data on windows * iostream: Fixed infinite loop that could occur with unhandled exceptions * httpclient: Fix bugs when some requests use proxies and others don't * httpserver: HTTPRequest.protocol is now set correctly when using the built-in SSL support * httpserver: When using multiple processes, the standard library's random number generator is re-seeded in each child process * httpserver: With xheaders enabled, X-Forwarded-Proto is supported as an alternative to X-Scheme * httpserver: Fixed bugs in multipart/form-data parsing * locale: format_date() now behaves sanely with dates in the future * locale: Updates to the language list * stack_context: Fixed bug with contexts leaking through reused IOStreams * stack_context: Simplified semantics and improved performance * web: The order of css_files from UIModules is now preserved * web: Fixed error with default_host redirect * web: StaticFileHandler works when os.path.sep != '/' (i.e. on Windows) * web: Fixed a caching-related bug in StaticFileHandler when a file's timestamp has changed but its contents have not. * web: Fixed bugs with HEAD requests and e.g. Etag headers * web: Fix bugs when different handlers have different static_paths * web: @removeslash will no longer cause a redirect loop when applied to the root path * websocket: Now works over SSL * websocket: Improved compatibility with proxies Many thanks to everyone who contributed patches, bug reports, and feedback that went into this release! -Ben tornado-4.5.3/docs/releases/v1.2.1.rst000066400000000000000000000013611322420601000172720ustar00rootroot00000000000000What's new in Tornado 1.2.1 =========================== Mar 3, 2011 ----------- :: We are pleased to announce the release of Tornado 1.2.1, available from https://github.com/downloads/facebook/tornado/tornado-1.2.1.tar.gz This release contains only two small changes relative to version 1.2: * FacebookGraphMixin has been updated to work with a recent change to the Facebook API. * Running "setup.py install" will no longer attempt to automatically install pycurl. This wasn't working well on platforms where the best way to install pycurl is via something like apt-get instead of easy_install. This is an important upgrade if you are using FacebookGraphMixin, but otherwise it can be safely ignored. tornado-4.5.3/docs/releases/v2.0.0.rst000066400000000000000000000052161322420601000172730ustar00rootroot00000000000000What's new in Tornado 2.0 ========================= Jun 21, 2011 ------------ :: Major changes: * Template output is automatically escaped by default; see backwards compatibility note below. * The default AsyncHTTPClient implementation is now simple_httpclient. * Python 3.2 is now supported. Backwards compatibility: * Template autoescaping is enabled by default. Applications upgrading from a previous release of Tornado must either disable autoescaping or adapt their templates to work with it. For most applications, the simplest way to do this is to pass autoescape=None to the Application constructor. Note that this affects certain built-in methods, e.g. xsrf_form_html and linkify, which must now be called with {% raw %} instead of {} * Applications that wish to continue using curl_httpclient instead of simple_httpclient may do so by calling AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") at the beginning of the process. Users of Python 2.5 will probably want to use curl_httpclient as simple_httpclient only supports ssl on Python 2.6+. * Python 3 compatibility involved many changes throughout the codebase, so users are encouraged to test their applications more thoroughly than usual when upgrading to this release. Other changes in this release: * Templates support several new directives: - {% autoescape ...%} to control escaping behavior - {% raw ... %} for unescaped output - {% module ... %} for calling UIModules * {% module Template(path, **kwargs) %} may now be used to call another template with an independent namespace * All IOStream callbacks are now run directly on the IOLoop via add_callback. * HTTPServer now supports IPv6 where available. To disable, pass family=socket.AF_INET to HTTPServer.bind(). * HTTPClient now supports IPv6, configurable via allow_ipv6=bool on the HTTPRequest. allow_ipv6 defaults to false on simple_httpclient and true on curl_httpclient. * RequestHandlers can use an encoding other than utf-8 for query parameters by overriding decode_argument() * Performance improvements, especially for applications that use a lot of IOLoop timeouts * HTTP OPTIONS method no longer requires an XSRF token. * JSON output (RequestHandler.write(dict)) now sets Content-Type to application/json * Etag computation can now be customized or disabled by overriding RequestHandler.compute_etag * USE_SIMPLE_HTTPCLIENT environment variable is no longer supported. Use AsyncHTTPClient.configure instead. tornado-4.5.3/docs/releases/v2.1.0.rst000066400000000000000000000166301322420601000172760ustar00rootroot00000000000000What's new in Tornado 2.1 ========================= Sep 20, 2011 ------------ Backwards-incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Support for secure cookies written by pre-1.0 releases of Tornado has been removed. The `.RequestHandler.get_secure_cookie` method no longer takes an ``include_name`` parameter. * The ``debug`` application setting now causes stack traces to be displayed in the browser on uncaught exceptions. Since this may leak sensitive information, debug mode is not recommended for public-facing servers. Security fixes ~~~~~~~~~~~~~~ * Diginotar has been removed from the default CA certificates file used by ``SimpleAsyncHTTPClient``. New modules ~~~~~~~~~~~ * `tornado.gen`: A generator-based interface to simplify writing asynchronous functions. * `tornado.netutil`: Parts of `tornado.httpserver` have been extracted into a new module for use with non-HTTP protocols. * `tornado.platform.twisted`: A bridge between the Tornado IOLoop and the Twisted Reactor, allowing code written for Twisted to be run on Tornado. * `tornado.process`: Multi-process mode has been improved, and can now restart crashed child processes. A new entry point has been added at `tornado.process.fork_processes`, although ``tornado.httpserver.HTTPServer.start`` is still supported. ``tornado.web`` ~~~~~~~~~~~~~~~ * `tornado.web.RequestHandler.write_error` replaces ``get_error_html`` as the preferred way to generate custom error pages (``get_error_html`` is still supported, but deprecated) * In `tornado.web.Application`, handlers may be specified by (fully-qualified) name instead of importing and passing the class object itself. * It is now possible to use a custom subclass of ``StaticFileHandler`` with the ``static_handler_class`` application setting, and this subclass can override the behavior of the ``static_url`` method. * `~tornado.web.StaticFileHandler` subclasses can now override ``get_cache_time`` to customize cache control behavior. * `tornado.web.RequestHandler.get_secure_cookie` now has a ``max_age_days`` parameter to allow applications to override the default one-month expiration. * `~tornado.web.RequestHandler.set_cookie` now accepts a ``max_age`` keyword argument to set the ``max-age`` cookie attribute (note underscore vs dash) * `tornado.web.RequestHandler.set_default_headers` may be overridden to set headers in a way that does not get reset during error handling. * `.RequestHandler.add_header` can now be used to set a header that can appear multiple times in the response. * `.RequestHandler.flush` can now take a callback for flow control. * The ``application/json`` content type can now be gzipped. * The cookie-signing functions are now accessible as static functions ``tornado.web.create_signed_value`` and ``tornado.web.decode_signed_value``. ``tornado.httpserver`` ~~~~~~~~~~~~~~~~~~~~~~ * To facilitate some advanced multi-process scenarios, ``HTTPServer`` has a new method ``add_sockets``, and socket-opening code is available separately as `tornado.netutil.bind_sockets`. * The ``cookies`` property is now available on ``tornado.httpserver.HTTPRequest`` (it is also available in its old location as a property of `~tornado.web.RequestHandler`) * ``tornado.httpserver.HTTPServer.bind`` now takes a backlog argument with the same meaning as ``socket.listen``. * `~tornado.httpserver.HTTPServer` can now be run on a unix socket as well as TCP. * Fixed exception at startup when ``socket.AI_ADDRCONFIG`` is not available, as on Windows XP ``IOLoop`` and ``IOStream`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * `~tornado.iostream.IOStream` performance has been improved, especially for small synchronous requests. * New methods ``tornado.iostream.IOStream.read_until_close`` and ``tornado.iostream.IOStream.read_until_regex``. * ``IOStream.read_bytes`` and ``IOStream.read_until_close`` now take a ``streaming_callback`` argument to return data as it is received rather than all at once. * `.IOLoop.add_timeout` now accepts `datetime.timedelta` objects in addition to absolute timestamps. * `~tornado.ioloop.PeriodicCallback` now sticks to the specified period instead of creeping later due to accumulated errors. * `tornado.ioloop.IOLoop` and `tornado.httpclient.HTTPClient` now have ``close()`` methods that should be used in applications that create and destroy many of these objects. * `.IOLoop.install` can now be used to use a custom subclass of IOLoop as the singleton without monkey-patching. * `~tornado.iostream.IOStream` should now always call the close callback instead of the connect callback on a connection error. * The `.IOStream` close callback will no longer be called while there are pending read callbacks that can be satisfied with buffered data. ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Now supports client SSL certificates with the ``client_key`` and ``client_cert`` parameters to `tornado.httpclient.HTTPRequest` * Now takes a maximum buffer size, to allow reading files larger than 100MB * Now works with HTTP 1.0 servers that don't send a Content-Length header * The ``allow_nonstandard_methods`` flag on HTTP client requests now permits methods other than ``POST`` and ``PUT`` to contain bodies. * Fixed file descriptor leaks and multiple callback invocations in ``SimpleAsyncHTTPClient`` * No longer consumes extra connection resources when following redirects. * Now works with buggy web servers that separate headers with ``\n`` instead of ``\r\n\r\n``. * Now sets ``response.request_time`` correctly. * Connect timeouts now work correctly. Other modules ~~~~~~~~~~~~~ * `tornado.auth.OpenIdMixin` now uses the correct realm when the callback URI is on a different domain. * `tornado.autoreload` has a new command-line interface which can be used to wrap any script. This replaces the ``--autoreload`` argument to `tornado.testing.main` and is more robust against syntax errors. * `tornado.autoreload.watch` can be used to watch files other than the sources of imported modules. * ``tornado.database.Connection`` has new variants of ``execute`` and ``executemany`` that return the number of rows affected instead of the last inserted row id. * `tornado.locale.load_translations` now accepts any properly-formatted locale name, not just those in the predefined ``LOCALE_NAMES`` list. * `tornado.options.define` now takes a ``group`` parameter to group options in ``--help`` output. * Template loaders now take a ``namespace`` constructor argument to add entries to the template namespace. * `tornado.websocket` now supports the latest ("hybi-10") version of the protocol (the old version, "hixie-76" is still supported; the correct version is detected automatically). * `tornado.websocket` now works on Python 3 Bug fixes ~~~~~~~~~ * Windows support has been improved. Windows is still not an officially supported platform, but the test suite now passes and `tornado.autoreload` works. * Uploading files whose names contain special characters will now work. * Cookie values containing special characters are now properly quoted and unquoted. * Multi-line headers are now supported. * Repeated Content-Length headers (which may be added by certain proxies) are now supported in `.HTTPServer`. * Unicode string literals now work in template expressions. * The template ``{% module %}`` directive now works even if applications use a template variable named ``modules``. * Requests with "Expect: 100-continue" now work on python 3 tornado-4.5.3/docs/releases/v2.1.1.rst000066400000000000000000000021361322420601000172730ustar00rootroot00000000000000What's new in Tornado 2.1.1 =========================== Oct 4, 2011 ----------- Bug fixes ~~~~~~~~~ * Fixed handling of closed connections with the ``epoll`` (i.e. Linux) ``IOLoop``. Previously, closed connections could be shut down too early, which most often manifested as "Stream is closed" exceptions in ``SimpleAsyncHTTPClient``. * Fixed a case in which chunked responses could be closed prematurely, leading to truncated output. * ``IOStream.connect`` now reports errors more consistently via logging and the close callback (this affects e.g. connections to localhost on FreeBSD). * ``IOStream.read_bytes`` again accepts both ``int`` and ``long`` arguments. * ``PeriodicCallback`` no longer runs repeatedly when ``IOLoop`` iterations complete faster than the resolution of ``time.time()`` (mainly a problem on Windows). Backwards-compatibility note ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Listening for ``IOLoop.ERROR`` alone is no longer sufficient for detecting closed connections on an otherwise unused socket. ``IOLoop.ERROR`` must always be used in combination with ``READ`` or ``WRITE``. tornado-4.5.3/docs/releases/v2.2.0.rst000066400000000000000000000127161322420601000173000ustar00rootroot00000000000000What's new in Tornado 2.2 ========================= Jan 30, 2012 ------------ Highlights ~~~~~~~~~~ * Updated and expanded WebSocket support. * Improved compatibility in the Twisted/Tornado bridge. * Template errors now generate better stack traces. * Better exception handling in `tornado.gen`. Security fixes ~~~~~~~~~~~~~~ * ``tornado.simple_httpclient`` now disables SSLv2 in all cases. Previously SSLv2 would be allowed if the Python interpreter was linked against a pre-1.0 version of OpenSSL. Backwards-incompatible changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * `tornado.process.fork_processes` now raises `SystemExit` if all child processes exit cleanly rather than returning ``None``. The old behavior was surprising and inconsistent with most of the documented examples of this function (which did not check the return value). * On Python 2.6, ``tornado.simple_httpclient`` only supports SSLv3. This is because Python 2.6 does not expose a way to support both SSLv3 and TLSv1 without also supporting the insecure SSLv2. * `tornado.websocket` no longer supports the older "draft 76" version of the websocket protocol by default, although this version can be enabled by overriding ``tornado.websocket.WebSocketHandler.allow_draft76``. ``tornado.httpclient`` ~~~~~~~~~~~~~~~~~~~~~~ * ``SimpleAsyncHTTPClient`` no longer hangs on ``HEAD`` requests, responses with no content, or empty ``POST``/``PUT`` response bodies. * ``SimpleAsyncHTTPClient`` now supports 303 and 307 redirect codes. * ``tornado.curl_httpclient`` now accepts non-integer timeouts. * ``tornado.curl_httpclient`` now supports basic authentication with an empty password. ``tornado.httpserver`` ~~~~~~~~~~~~~~~~~~~~~~ * `.HTTPServer` with ``xheaders=True`` will no longer accept ``X-Real-IP`` headers that don't look like valid IP addresses. * `.HTTPServer` now treats the ``Connection`` request header as case-insensitive. ``tornado.ioloop`` and ``tornado.iostream`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``IOStream.write`` now works correctly when given an empty string. * ``IOStream.read_until`` (and ``read_until_regex``) now perform better when there is a lot of buffered data, which improves peformance of ``SimpleAsyncHTTPClient`` when downloading files with lots of chunks. * `.SSLIOStream` now works correctly when ``ssl_version`` is set to a value other than ``SSLv23``. * Idle ``IOLoops`` no longer wake up several times a second. * `tornado.ioloop.PeriodicCallback` no longer triggers duplicate callbacks when stopped and started repeatedly. ``tornado.template`` ~~~~~~~~~~~~~~~~~~~~ * Exceptions in template code will now show better stack traces that reference lines from the original template file. * ``{#`` and ``#}`` can now be used for comments (and unlike the old ``{% comment %}`` directive, these can wrap other template directives). * Template directives may now span multiple lines. ``tornado.web`` ~~~~~~~~~~~~~~~ * Now behaves better when given malformed ``Cookie`` headers * `.RequestHandler.redirect` now has a ``status`` argument to send status codes other than 301 and 302. * New method `.RequestHandler.on_finish` may be overridden for post-request processing (as a counterpart to `.RequestHandler.prepare`) * `.StaticFileHandler` now outputs ``Content-Length`` and ``Etag`` headers on ``HEAD`` requests. * `.StaticFileHandler` now has overridable ``get_version`` and ``parse_url_path`` methods for use in subclasses. * `.RequestHandler.static_url` now takes an ``include_host`` parameter (in addition to the old support for the ``RequestHandler.include_host`` attribute). ``tornado.websocket`` ~~~~~~~~~~~~~~~~~~~~~ * Updated to support the latest version of the protocol, as finalized in RFC 6455. * Many bugs were fixed in all supported protocol versions. * `tornado.websocket` no longer supports the older "draft 76" version of the websocket protocol by default, although this version can be enabled by overriding ``tornado.websocket.WebSocketHandler.allow_draft76``. * `.WebSocketHandler.write_message` now accepts a ``binary`` argument to send binary messages. * Subprotocols (i.e. the ``Sec-WebSocket-Protocol`` header) are now supported; see the `.WebSocketHandler.select_subprotocol` method for details. * ``.WebSocketHandler.get_websocket_scheme`` can be used to select the appropriate url scheme (``ws://`` or ``wss://``) in cases where ``HTTPRequest.protocol`` is not set correctly. Other modules ~~~~~~~~~~~~~ * `tornado.auth.TwitterMixin.authenticate_redirect` now takes a ``callback_uri`` parameter. * `tornado.auth.TwitterMixin.twitter_request` now accepts both URLs and partial paths (complete URLs are useful for the search API which follows different patterns). * Exception handling in `tornado.gen` has been improved. It is now possible to catch exceptions thrown by a ``Task``. * `tornado.netutil.bind_sockets` now works when ``getaddrinfo`` returns duplicate addresses. * `tornado.platform.twisted` compatibility has been significantly improved. Twisted version 11.1.0 is now supported in addition to 11.0.0. * `tornado.process.fork_processes` correctly reseeds the `random` module even when `os.urandom` is not implemented. * `tornado.testing.main` supports a new flag ``--exception_on_interrupt``, which can be set to false to make ``Ctrl-C`` kill the process more reliably (at the expense of stack traces when it does so). * ``tornado.version_info`` is now a four-tuple so official releases can be distinguished from development branches. tornado-4.5.3/docs/releases/v2.2.1.rst000066400000000000000000000010751322420601000172750ustar00rootroot00000000000000What's new in Tornado 2.2.1 =========================== Apr 23, 2012 ------------ Security fixes ~~~~~~~~~~~~~~ * `tornado.web.RequestHandler.set_header` now properly sanitizes input values to protect against header injection, response splitting, etc. (it has always attempted to do this, but the check was incorrect). Note that redirects, the most likely source of such bugs, are protected by a separate check in `.RequestHandler.redirect`. Bug fixes ~~~~~~~~~ * Colored logging configuration in `tornado.options` is compatible with Python 3.2.3 (and 3.3). tornado-4.5.3/docs/releases/v2.3.0.rst000066400000000000000000000117221322420601000172750ustar00rootroot00000000000000What's new in Tornado 2.3 ========================= May 31, 2012 ------------ HTTP clients ~~~~~~~~~~~~ * `tornado.httpclient.HTTPClient` now supports the same constructor keyword arguments as `.AsyncHTTPClient`. * The ``max_clients`` keyword argument to `.AsyncHTTPClient.configure` now works. * ``tornado.simple_httpclient`` now supports the ``OPTIONS`` and ``PATCH`` HTTP methods. * ``tornado.simple_httpclient`` is better about closing its sockets instead of leaving them for garbage collection. * ``tornado.simple_httpclient`` correctly verifies SSL certificates for URLs containing IPv6 literals (This bug affected Python 2.5 and 2.6). * ``tornado.simple_httpclient`` no longer includes basic auth credentials in the ``Host`` header when those credentials are extracted from the URL. * ``tornado.simple_httpclient`` no longer modifies the caller-supplied header dictionary, which caused problems when following redirects. * ``tornado.curl_httpclient`` now supports client SSL certificates (using the same ``client_cert`` and ``client_key`` arguments as ``tornado.simple_httpclient``) HTTP Server ~~~~~~~~~~~ * `.HTTPServer` now works correctly with paths starting with ``//`` * ``HTTPHeaders.copy`` (inherited from `dict.copy`) now works correctly. * ``HTTPConnection.address`` is now always the socket address, even for non-IP sockets. ``HTTPRequest.remote_ip`` is still always an IP-style address (fake data is used for non-IP sockets) * Extra data at the end of multipart form bodies is now ignored, which fixes a compatibility problem with an iOS HTTP client library. ``IOLoop`` and ``IOStream`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * `.IOStream` now has an ``error`` attribute that can be used to determine why a socket was closed. * ``tornado.iostream.IOStream.read_until`` and ``read_until_regex`` are much faster with large input. * ``IOStream.write`` performs better when given very large strings. * `.IOLoop.instance()` is now thread-safe. ``tornado.options`` ~~~~~~~~~~~~~~~~~~~ * `tornado.options` options with ``multiple=True`` that are set more than once now overwrite rather than append. This makes it possible to override values set in ``parse_config_file`` with ``parse_command_line``. * `tornado.options` ``--help`` output is now prettier. * `tornado.options.options` now supports attribute assignment. ``tornado.template`` ~~~~~~~~~~~~~~~~~~~~ * Template files containing non-ASCII (utf8) characters now work on Python 3 regardless of the locale environment variables. * Templates now support ``else`` clauses in ``try``/``except``/``finally``/``else`` blocks. ``tornado.web`` ~~~~~~~~~~~~~~~ * `tornado.web.RequestHandler` now supports the ``PATCH`` HTTP method. Note that this means any existing methods named ``patch`` in ``RequestHandler`` subclasses will need to be renamed. * `tornado.web.addslash` and ``removeslash`` decorators now send permanent redirects (301) instead of temporary (302). * `.RequestHandler.flush` now invokes its callback whether there was any data to flush or not. * Repeated calls to `.RequestHandler.set_cookie` with the same name now overwrite the previous cookie instead of producing additional copies. * ``tornado.web.OutputTransform.transform_first_chunk`` now takes and returns a status code in addition to the headers and chunk. This is a backwards-incompatible change to an interface that was never technically private, but was not included in the documentation and does not appear to have been used outside Tornado itself. * Fixed a bug on python versions before 2.6.5 when `tornado.web.URLSpec` regexes are constructed from unicode strings and keyword arguments are extracted. * The ``reverse_url`` function in the template namespace now comes from the `.RequestHandler` rather than the `.Application`. (Unless overridden, `.RequestHandler.reverse_url` is just an alias for the `.Application` method). * The ``Etag`` header is now returned on 304 responses to an ``If-None-Match`` request, improving compatibility with some caches. * `tornado.web` will no longer produce responses with status code 304 that also have entity headers such as ``Content-Length``. Other modules ~~~~~~~~~~~~~ * `tornado.auth.FacebookGraphMixin` no longer sends ``post_args`` redundantly in the url. * The ``extra_params`` argument to `tornado.escape.linkify` may now be a callable, to allow parameters to be chosen separately for each link. * `tornado.gen` no longer leaks ``StackContexts`` when a ``@gen.engine`` wrapped function is called repeatedly. * `tornado.locale.get_supported_locales` no longer takes a meaningless ``cls`` argument. * `.StackContext` instances now have a deactivation callback that can be used to prevent further propagation. * `tornado.testing.AsyncTestCase.wait` now resets its timeout on each call. * `tornado.wsgi.WSGIApplication` now parses arguments correctly on Python 3. * Exception handling on Python 3 has been improved; previously some exceptions such as `UnicodeDecodeError` would generate ``TypeErrors`` tornado-4.5.3/docs/releases/v2.4.0.rst000066400000000000000000000055041322420601000172770ustar00rootroot00000000000000What's new in Tornado 2.4 ========================= Sep 4, 2012 ----------- General ~~~~~~~ * Fixed Python 3 bugs in `tornado.auth`, `tornado.locale`, and `tornado.wsgi`. HTTP clients ~~~~~~~~~~~~ * Removed ``max_simultaneous_connections`` argument from `tornado.httpclient` (both implementations). This argument hasn't been useful for some time (if you were using it you probably want ``max_clients`` instead) * ``tornado.simple_httpclient`` now accepts and ignores HTTP 1xx status responses. `tornado.ioloop` and `tornado.iostream` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Fixed a bug introduced in 2.3 that would cause `.IOStream` close callbacks to not run if there were pending reads. * Improved error handling in `.SSLIOStream` and SSL-enabled `.TCPServer`. * ``SSLIOStream.get_ssl_certificate`` now has a ``binary_form`` argument which is passed to ``SSLSocket.getpeercert``. * ``SSLIOStream.write`` can now be called while the connection is in progress, same as non-SSL `.IOStream` (but be careful not to send sensitive data until the connection has completed and the certificate has been verified). * `.IOLoop.add_handler` cannot be called more than once with the same file descriptor. This was always true for ``epoll``, but now the other implementations enforce it too. * On Windows, `.TCPServer` uses ``SO_EXCLUSIVEADDRUSER`` instead of ``SO_REUSEADDR``. `tornado.template` ~~~~~~~~~~~~~~~~~~ * ``{% break %}`` and ``{% continue %}`` can now be used looping constructs in templates. * It is no longer an error for an if/else/for/etc block in a template to have an empty body. `tornado.testing` ~~~~~~~~~~~~~~~~~ * New class `tornado.testing.AsyncHTTPSTestCase` is like `.AsyncHTTPTestCase`. but enables SSL for the testing server (by default using a self-signed testing certificate). * `tornado.testing.main` now accepts additional keyword arguments and forwards them to `unittest.main`. `tornado.web` ~~~~~~~~~~~~~ * New method `.RequestHandler.get_template_namespace` can be overridden to add additional variables without modifying keyword arguments to ``render_string``. * `.RequestHandler.add_header` now works with `.WSGIApplication`. * `.RequestHandler.get_secure_cookie` now handles a potential error case. * ``RequestHandler.__init__`` now calls ``super().__init__`` to ensure that all constructors are called when multiple inheritance is used. * Docs have been updated with a description of all available :py:attr:`Application settings ` Other modules ~~~~~~~~~~~~~ * `.OAuthMixin` now accepts ``"oob"`` as a ``callback_uri``. * `.OpenIdMixin` now also returns the ``claimed_id`` field for the user. * `tornado.platform.twisted` shutdown sequence is now more compatible. * The logging configuration used in `tornado.options` is now more tolerant of non-ascii byte strings. tornado-4.5.3/docs/releases/v2.4.1.rst000066400000000000000000000007211322420601000172740ustar00rootroot00000000000000What's new in Tornado 2.4.1 =========================== Nov 24, 2012 ------------ Bug fixes ~~~~~~~~~ * Fixed a memory leak in `tornado.stack_context` that was especially likely with long-running ``@gen.engine`` functions. * `tornado.auth.TwitterMixin` now works on Python 3. * Fixed a bug in which ``IOStream.read_until_close`` with a streaming callback would sometimes pass the last chunk of data to the final callback instead of the streaming callback. tornado-4.5.3/docs/releases/v3.0.0.rst000066400000000000000000000511241322420601000172730ustar00rootroot00000000000000What's new in Tornado 3.0 ========================= Mar 29, 2013 ------------ Highlights ^^^^^^^^^^ * The ``callback`` argument to many asynchronous methods is now optional, and these methods return a `.Future`. The `tornado.gen` module now understands ``Futures``, and these methods can be used directly without a `.gen.Task` wrapper. * New function `.IOLoop.current` returns the `.IOLoop` that is running on the current thread (as opposed to `.IOLoop.instance`, which returns a specific thread's (usually the main thread's) IOLoop. * New class `tornado.netutil.Resolver` provides an asynchronous interface to DNS resolution. The default implementation is still blocking, but non-blocking implementations are available using one of three optional dependencies: `~tornado.netutil.ThreadedResolver` using the `concurrent.futures` thread pool, ``tornado.platform.caresresolver.CaresResolver`` using the ``pycares`` library, or ``tornado.platform.twisted.TwistedResolver`` using ``twisted`` * Tornado's logging is now less noisy, and it no longer goes directly to the root logger, allowing for finer-grained configuration. * New class `tornado.process.Subprocess` wraps `subprocess.Popen` with `.PipeIOStream` access to the child's file descriptors. * `.IOLoop` now has a static `configure <.Configurable.configure>` method like the one on `.AsyncHTTPClient`, which can be used to select an `.IOLoop` implementation other than the default. * `.IOLoop` can now optionally use a monotonic clock if available (see below for more details). Backwards-incompatible changes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Python 2.5 is no longer supported. Python 3 is now supported in a single codebase instead of using ``2to3`` * The ``tornado.database`` module has been removed. It is now available as a separate package, `torndb `_ * Functions that take an ``io_loop`` parameter now default to `.IOLoop.current()` instead of `.IOLoop.instance()`. * Empty HTTP request arguments are no longer ignored. This applies to ``HTTPRequest.arguments`` and ``RequestHandler.get_argument[s]`` in WSGI and non-WSGI modes. * On Python 3, `tornado.escape.json_encode` no longer accepts byte strings. * On Python 3, the ``get_authenticated_user`` methods in `tornado.auth` now return character strings instead of byte strings. * ``tornado.netutil.TCPServer`` has moved to its own module, `tornado.tcpserver`. * The Tornado test suite now requires ``unittest2`` when run on Python 2.6. * `tornado.options.options` is no longer a subclass of `dict`; attribute-style access is now required. Detailed changes by module ^^^^^^^^^^^^^^^^^^^^^^^^^^ Multiple modules ~~~~~~~~~~~~~~~~ * Tornado no longer logs to the root logger. Details on the new logging scheme can be found under the `tornado.log` module. Note that in some cases this will require that you add an explicit logging configuration in order to see any output (perhaps just calling ``logging.basicConfig()``), although both `.IOLoop.start()` and `tornado.options.parse_command_line` will do this for you. * On python 3.2+, methods that take an ``ssl_options`` argument (on `.SSLIOStream`, `.TCPServer`, and `.HTTPServer`) now accept either a dictionary of options or an `ssl.SSLContext` object. * New optional dependency on `concurrent.futures` to provide better support for working with threads. `concurrent.futures` is in the standard library for Python 3.2+, and can be installed on older versions with ``pip install futures``. `tornado.autoreload` ~~~~~~~~~~~~~~~~~~~~ * `tornado.autoreload` is now more reliable when there are errors at import time. * Calling `tornado.autoreload.start` (or creating an `.Application` with ``debug=True``) twice on the same `.IOLoop` now does nothing (instead of creating multiple periodic callbacks). Starting autoreload on more than one `.IOLoop` in the same process now logs a warning. * Scripts run by autoreload no longer inherit ``__future__`` imports used by Tornado. `tornado.auth` ~~~~~~~~~~~~~~ * On Python 3, the ``get_authenticated_user`` method family now returns character strings instead of byte strings. * Asynchronous methods defined in `tornado.auth` now return a `.Future`, and their ``callback`` argument is optional. The ``Future`` interface is preferred as it offers better error handling (the previous interface just logged a warning and returned None). * The `tornado.auth` mixin classes now define a method ``get_auth_http_client``, which can be overridden to use a non-default `.AsyncHTTPClient` instance (e.g. to use a different `.IOLoop`) * Subclasses of `.OAuthMixin` are encouraged to override `.OAuthMixin._oauth_get_user_future` instead of ``_oauth_get_user``, although both methods are still supported. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ * New module `tornado.concurrent` contains code to support working with `concurrent.futures`, or to emulate future-based interface when that module is not available. ``tornado.curl_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Preliminary support for ``tornado.curl_httpclient`` on Python 3. The latest official release of pycurl only supports Python 2, but Ubuntu has a port available in 12.10 (``apt-get install python3-pycurl``). This port currently has bugs that prevent it from handling arbitrary binary data but it should work for textual (utf8) resources. * Fix a crash with libcurl 7.29.0 if a curl object is created and closed without being used. `tornado.escape` ~~~~~~~~~~~~~~~~ * On Python 3, `~tornado.escape.json_encode` no longer accepts byte strings. This mirrors the behavior of the underlying json module. Python 2 behavior is unchanged but should be faster. `tornado.gen` ~~~~~~~~~~~~~ * New decorator ``@gen.coroutine`` is available as an alternative to ``@gen.engine``. It automatically returns a `.Future`, and within the function instead of calling a callback you return a value with ``raise gen.Return(value)`` (or simply ``return value`` in Python 3.3). * Generators may now yield `.Future` objects. * Callbacks produced by `.gen.Callback` and `.gen.Task` are now automatically stack-context-wrapped, to minimize the risk of context leaks when used with asynchronous functions that don't do their own wrapping. * Fixed a memory leak involving generators, `.RequestHandler.flush`, and clients closing connections while output is being written. * Yielding a large list no longer has quadratic performance. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * `.AsyncHTTPClient.fetch` now returns a `.Future` and its callback argument is optional. When the future interface is used, any error will be raised automatically, as if `.HTTPResponse.rethrow` was called. * `.AsyncHTTPClient.configure` and all `.AsyncHTTPClient` constructors now take a ``defaults`` keyword argument. This argument should be a dictionary, and its values will be used in place of corresponding attributes of `~tornado.httpclient.HTTPRequest` that are not set. * All unset attributes of `tornado.httpclient.HTTPRequest` are now ``None``. The default values of some attributes (``connect_timeout``, ``request_timeout``, ``follow_redirects``, ``max_redirects``, ``use_gzip``, ``proxy_password``, ``allow_nonstandard_methods``, and ``validate_cert`` have been moved from `~tornado.httpclient.HTTPRequest` to the client implementations. * The ``max_clients`` argument to `.AsyncHTTPClient` is now a keyword-only argument. * Keyword arguments to `.AsyncHTTPClient.configure` are no longer used when instantiating an implementation subclass directly. * Secondary `.AsyncHTTPClient` callbacks (``streaming_callback``, ``header_callback``, and ``prepare_curl_callback``) now respect `.StackContext`. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * `.HTTPServer` no longer logs an error when it is unable to read a second request from an HTTP 1.1 keep-alive connection. * `.HTTPServer` now takes a ``protocol`` keyword argument which can be set to ``https`` if the server is behind an SSL-decoding proxy that does not set any supported X-headers. * ``tornado.httpserver.HTTPConnection`` now has a ``set_close_callback`` method that should be used instead of reaching into its ``stream`` attribute. * Empty HTTP request arguments are no longer ignored. This applies to ``HTTPRequest.arguments`` and ``RequestHandler.get_argument[s]`` in WSGI and non-WSGI modes. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * New function `.IOLoop.current` returns the ``IOLoop`` that is running on the current thread (as opposed to `.IOLoop.instance`, which returns a specific thread's (usually the main thread's) IOLoop). * New method `.IOLoop.add_future` to run a callback on the IOLoop when an asynchronous `.Future` finishes. * `.IOLoop` now has a static `configure <.Configurable.configure>` method like the one on `.AsyncHTTPClient`, which can be used to select an `.IOLoop` implementation other than the default. * The `.IOLoop` poller implementations (``select``, ``epoll``, ``kqueue``) are now available as distinct subclasses of `.IOLoop`. Instantiating `.IOLoop` will continue to automatically choose the best available implementation. * The `.IOLoop` constructor has a new keyword argument ``time_func``, which can be used to set the time function used when scheduling callbacks. This is most useful with the `time.monotonic` function, introduced in Python 3.3 and backported to older versions via the ``monotime`` module. Using a monotonic clock here avoids problems when the system clock is changed. * New function `.IOLoop.time` returns the current time according to the IOLoop. To use the new monotonic clock functionality, all calls to `.IOLoop.add_timeout` must be either pass a `datetime.timedelta` or a time relative to `.IOLoop.time`, not `time.time`. (`time.time` will continue to work only as long as the IOLoop's ``time_func`` argument is not used). * New convenience method `.IOLoop.run_sync` can be used to start an IOLoop just long enough to run a single coroutine. * New method `.IOLoop.add_callback_from_signal` is safe to use in a signal handler (the regular `.add_callback` method may deadlock). * `.IOLoop` now uses `signal.set_wakeup_fd` where available (Python 2.6+ on Unix) to avoid a race condition that could result in Python signal handlers being delayed. * Method ``IOLoop.running()`` has been removed. * `.IOLoop` has been refactored to better support subclassing. * `.IOLoop.add_callback` and `.add_callback_from_signal` now take ``*args, **kwargs`` to pass along to the callback. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * `.IOStream.connect` now has an optional ``server_hostname`` argument which will be used for SSL certificate validation when applicable. Additionally, when supported (on Python 3.2+), this hostname will be sent via SNI (and this is supported by ``tornado.simple_httpclient``) * Much of `.IOStream` has been refactored into a separate class `.BaseIOStream`. * New class `tornado.iostream.PipeIOStream` provides the IOStream interface on pipe file descriptors. * `.IOStream` now raises a new exception ``tornado.iostream.StreamClosedError`` when you attempt to read or write after the stream has been closed (by either side). * `.IOStream` now simply closes the connection when it gets an ``ECONNRESET`` error, rather than logging it as an error. * ``IOStream.error`` no longer picks up unrelated exceptions. * `.BaseIOStream.close` now has an ``exc_info`` argument (similar to the one used in the `logging` module) that can be used to set the stream's ``error`` attribute when closing it. * `.BaseIOStream.read_until_close` now works correctly when it is called while there is buffered data. * Fixed a major performance regression when run on PyPy (introduced in Tornado 2.3). `tornado.log` ~~~~~~~~~~~~~ * New module containing `.enable_pretty_logging` and `.LogFormatter`, moved from the options module. * `.LogFormatter` now handles non-ascii data in messages and tracebacks better. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * New class `tornado.netutil.Resolver` provides an asynchronous interface to DNS resolution. The default implementation is still blocking, but non-blocking implementations are available using one of three optional dependencies: `~tornado.netutil.ThreadedResolver` using the `concurrent.futures` thread pool, `tornado.platform.caresresolver.CaresResolver` using the ``pycares`` library, or `tornado.platform.twisted.TwistedResolver` using ``twisted`` * New function `tornado.netutil.is_valid_ip` returns true if a given string is a valid IP (v4 or v6) address. * `tornado.netutil.bind_sockets` has a new ``flags`` argument that can be used to pass additional flags to ``getaddrinfo``. * `tornado.netutil.bind_sockets` no longer sets ``AI_ADDRCONFIG``; this will cause it to bind to both ipv4 and ipv6 more often than before. * `tornado.netutil.bind_sockets` now works when Python was compiled with ``--disable-ipv6`` but IPv6 DNS resolution is available on the system. * ``tornado.netutil.TCPServer`` has moved to its own module, `tornado.tcpserver`. `tornado.options` ~~~~~~~~~~~~~~~~~ * The class underlying the functions in `tornado.options` is now public (`tornado.options.OptionParser`). This can be used to create multiple independent option sets, such as for subcommands. * `tornado.options.parse_config_file` now configures logging automatically by default, in the same way that `~tornado.options.parse_command_line` does. * New function `tornado.options.add_parse_callback` schedules a callback to be run after the command line or config file has been parsed. The keyword argument ``final=False`` can be used on either parsing function to supress these callbacks. * `tornado.options.define` now takes a ``callback`` argument. This callback will be run with the new value whenever the option is changed. This is especially useful for options that set other options, such as by reading from a config file. * `tornado.options.parse_command_line` ``--help`` output now goes to ``stderr`` rather than ``stdout``. * `tornado.options.options` is no longer a subclass of `dict`; attribute-style access is now required. * `tornado.options.options` (and `.OptionParser` instances generally) now have a `.mockable()` method that returns a wrapper object compatible with `mock.patch `. * Function ``tornado.options.enable_pretty_logging`` has been moved to the `tornado.log` module. `tornado.platform.caresresolver` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * New module containing an asynchronous implementation of the `.Resolver` interface, using the ``pycares`` library. `tornado.platform.twisted` ~~~~~~~~~~~~~~~~~~~~~~~~~~ * New class `tornado.platform.twisted.TwistedIOLoop` allows Tornado code to be run on the Twisted reactor (as opposed to the existing `.TornadoReactor`, which bridges the gap in the other direction). * New class `tornado.platform.twisted.TwistedResolver` is an asynchronous implementation of the `.Resolver` interface. `tornado.process` ~~~~~~~~~~~~~~~~~ * New class `tornado.process.Subprocess` wraps `subprocess.Popen` with `.PipeIOStream` access to the child's file descriptors. ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``SimpleAsyncHTTPClient`` now takes a ``resolver`` keyword argument (which may be passed to either the constructor or `configure <.Configurable.configure>`), to allow it to use the new non-blocking `tornado.netutil.Resolver`. * When following redirects, ``SimpleAsyncHTTPClient`` now treats a 302 response code the same as a 303. This is contrary to the HTTP spec but consistent with all browsers and other major HTTP clients (including ``CurlAsyncHTTPClient``). * The behavior of ``header_callback`` with ``SimpleAsyncHTTPClient`` has changed and is now the same as that of ``CurlAsyncHTTPClient``. The header callback now receives the first line of the response (e.g. ``HTTP/1.0 200 OK``) and the final empty line. * ``tornado.simple_httpclient`` now accepts responses with a 304 status code that include a ``Content-Length`` header. * Fixed a bug in which ``SimpleAsyncHTTPClient`` callbacks were being run in the client's ``stack_context``. `tornado.stack_context` ~~~~~~~~~~~~~~~~~~~~~~~ * `.stack_context.wrap` now runs the wrapped callback in a more consistent environment by recreating contexts even if they already exist on the stack. * Fixed a bug in which stack contexts could leak from one callback chain to another. * Yield statements inside a ``with`` statement can cause stack contexts to become inconsistent; an exception will now be raised when this case is detected. `tornado.template` ~~~~~~~~~~~~~~~~~~ * Errors while rendering templates no longer log the generated code, since the enhanced stack traces (from version 2.1) should make this unnecessary. * The ``{% apply %}`` directive now works properly with functions that return both unicode strings and byte strings (previously only byte strings were supported). * Code in templates is no longer affected by Tornado's ``__future__`` imports (which previously included ``absolute_import`` and ``division``). `tornado.testing` ~~~~~~~~~~~~~~~~~ * New function `tornado.testing.bind_unused_port` both chooses a port and binds a socket to it, so there is no risk of another process using the same port. ``get_unused_port`` is now deprecated. * New decorator `tornado.testing.gen_test` can be used to allow for yielding `tornado.gen` objects in tests, as an alternative to the ``stop`` and ``wait`` methods of `.AsyncTestCase`. * `tornado.testing.AsyncTestCase` and friends now extend ``unittest2.TestCase`` when it is available (and continue to use the standard ``unittest`` module when ``unittest2`` is not available) * `tornado.testing.ExpectLog` can be used as a finer-grained alternative to `tornado.testing.LogTrapTestCase` * The command-line interface to `tornado.testing.main` now supports additional arguments from the underlying `unittest` module: ``verbose``, ``quiet``, ``failfast``, ``catch``, ``buffer``. * The deprecated ``--autoreload`` option of `tornado.testing.main` has been removed. Use ``python -m tornado.autoreload`` as a prefix command instead. * The ``--httpclient`` option of `tornado.testing.main` has been moved to ``tornado.test.runtests`` so as not to pollute the application option namespace. The `tornado.options` module's new callback support now makes it easy to add options from a wrapper script instead of putting all possible options in `tornado.testing.main`. * `.AsyncHTTPTestCase` no longer calls `.AsyncHTTPClient.close` for tests that use the singleton `.IOLoop.instance`. * `.LogTrapTestCase` no longer fails when run in unknown logging configurations. This allows tests to be run under nose, which does its own log buffering (`.LogTrapTestCase` doesn't do anything useful in this case, but at least it doesn't break things any more). ``tornado.util`` ~~~~~~~~~~~~~~~~ * ``tornado.util.b`` (which was only intended for internal use) is gone. `tornado.web` ~~~~~~~~~~~~~ * `.RequestHandler.set_header` now overwrites previous header values case-insensitively. * `tornado.web.RequestHandler` has new attributes ``path_args`` and ``path_kwargs``, which contain the positional and keyword arguments that are passed to the ``get``/``post``/etc method. These attributes are set before those methods are called, so they are available during ``prepare()`` * `tornado.web.ErrorHandler` no longer requires XSRF tokens on ``POST`` requests, so posts to an unknown url will always return 404 instead of complaining about XSRF tokens. * Several methods related to HTTP status codes now take a ``reason`` keyword argument to specify an alternate "reason" string (i.e. the "Not Found" in "HTTP/1.1 404 Not Found"). It is now possible to set status codes other than those defined in the spec, as long as a reason string is given. * The ``Date`` HTTP header is now set by default on all responses. * ``Etag``/``If-None-Match`` requests now work with `.StaticFileHandler`. * `.StaticFileHandler` no longer sets ``Cache-Control: public`` unnecessarily. * When gzip is enabled in a `tornado.web.Application`, appropriate ``Vary: Accept-Encoding`` headers are now sent. * It is no longer necessary to pass all handlers for a host in a single `.Application.add_handlers` call. Now the request will be matched against the handlers for any ``host_pattern`` that includes the request's ``Host`` header. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * Client-side WebSocket support is now available: `tornado.websocket.websocket_connect` * `.WebSocketHandler` has new methods `~.WebSocketHandler.ping` and `~.WebSocketHandler.on_pong` to send pings to the browser (not supported on the ``draft76`` protocol) tornado-4.5.3/docs/releases/v3.0.1.rst000066400000000000000000000017711322420601000172770ustar00rootroot00000000000000What's new in Tornado 3.0.1 =========================== Apr 8, 2013 ----------- * The interface of `tornado.auth.FacebookGraphMixin` is now consistent with its documentation and the rest of the module. The ``get_authenticated_user`` and ``facebook_request`` methods return a ``Future`` and the ``callback`` argument is optional. * The `tornado.testing.gen_test` decorator will no longer be recognized as a (broken) test by ``nose``. * Work around a bug in Ubuntu 13.04 betas involving an incomplete backport of the `ssl.match_hostname` function. * `tornado.websocket.websocket_connect` now fails cleanly when it attempts to connect to a non-websocket url. * `tornado.testing.LogTrapTestCase` once again works with byte strings on Python 2. * The ``request`` attribute of `tornado.httpclient.HTTPResponse` is now always an `~tornado.httpclient.HTTPRequest`, never a ``_RequestProxy``. * Exceptions raised by the `tornado.gen` module now have better messages when tuples are used as callback keys. tornado-4.5.3/docs/releases/v3.0.2.rst000066400000000000000000000006401322420601000172720ustar00rootroot00000000000000What's new in Tornado 3.0.2 =========================== Jun 2, 2013 ----------- * `tornado.auth.TwitterMixin` now defaults to version 1.1 of the Twitter API, instead of version 1.0 which is being `discontinued on June 11 `_. It also now uses HTTPS when talking to Twitter. * Fixed a potential memory leak with a long chain of `.gen.coroutine` or `.gen.engine` functions. tornado-4.5.3/docs/releases/v3.1.0.rst000066400000000000000000000245711322420601000173020ustar00rootroot00000000000000What's new in Tornado 3.1 ========================= Jun 15, 2013 ------------ Multiple modules ~~~~~~~~~~~~~~~~ * Many reference cycles have been broken up throughout the package, allowing for more efficient garbage collection on CPython. * Silenced some log messages when connections are opened and immediately closed (i.e. port scans), or other situations related to closed connections. * Various small speedups: `.HTTPHeaders` case normalization, `.UIModule` proxy objects, precompile some regexes. `tornado.auth` ~~~~~~~~~~~~~~ * `~tornado.auth.OAuthMixin` always sends ``oauth_version=1.0`` in its request as required by the spec. * `~tornado.auth.FacebookGraphMixin` now uses ``self._FACEBOOK_BASE_URL`` in `~.FacebookGraphMixin.facebook_request` to allow the base url to be overridden. * The ``authenticate_redirect`` and ``authorize_redirect`` methods in the `tornado.auth` mixin classes all now return Futures. These methods are asynchronous in `.OAuthMixin` and derived classes, although they do not take a callback. The `.Future` these methods return must be yielded if they are called from a function decorated with `.gen.coroutine` (but not `.gen.engine`). * `.TwitterMixin` now uses ``/account/verify_credentials`` to get information about the logged-in user, which is more robust against changing screen names. * The ``demos`` directory (in the source distribution) has a new ``twitter`` demo using `.TwitterMixin`. `tornado.escape` ~~~~~~~~~~~~~~~~ * `.url_escape` and `.url_unescape` have a new ``plus`` argument (defaulting to True for consistency with the previous behavior) which specifies whether they work like `urllib.parse.unquote` or `urllib.parse.unquote_plus`. `tornado.gen` ~~~~~~~~~~~~~ * Fixed a potential memory leak with long chains of `tornado.gen` coroutines. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * `tornado.httpclient.HTTPRequest` takes a new argument ``auth_mode``, which can be either ``basic`` or ``digest``. Digest authentication is only supported with ``tornado.curl_httpclient``. * ``tornado.curl_httpclient`` no longer goes into an infinite loop when pycurl returns a negative timeout. * ``curl_httpclient`` now supports the ``PATCH`` and ``OPTIONS`` methods without the use of ``allow_nonstandard_methods=True``. * Worked around a class of bugs in libcurl that would result in errors from `.IOLoop.update_handler` in various scenarios including digest authentication and socks proxies. * The ``TCP_NODELAY`` flag is now set when appropriate in ``simple_httpclient``. * ``simple_httpclient`` no longer logs exceptions, since those exceptions are made available to the caller as ``HTTPResponse.error``. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * `tornado.httpserver.HTTPServer` handles malformed HTTP headers more gracefully. * `.HTTPServer` now supports lists of IPs in ``X-Forwarded-For`` (it chooses the last, i.e. nearest one). * Memory is now reclaimed promptly on CPython when an HTTP request fails because it exceeded the maximum upload size. * The ``TCP_NODELAY`` flag is now set when appropriate in `.HTTPServer`. * The `.HTTPServer` ``no_keep_alive`` option is now respected with HTTP 1.0 connections that explicitly pass ``Connection: keep-alive``. * The ``Connection: keep-alive`` check for HTTP 1.0 connections is now case-insensitive. * The `str` and `repr` of ``tornado.httpserver.HTTPRequest`` no longer include the request body, reducing log spam on errors (and potential exposure/retention of private data). `tornado.httputil` ~~~~~~~~~~~~~~~~~~ * The cache used in `.HTTPHeaders` will no longer grow without bound. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * Some `.IOLoop` implementations (such as ``pyzmq``) accept objects other than integer file descriptors; these objects will now have their ``.close()`` method called when the ``IOLoop` is closed with ``all_fds=True``. * The stub handles left behind by `.IOLoop.remove_timeout` will now get cleaned up instead of waiting to expire. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * Fixed a bug in `.BaseIOStream.read_until_close` that would sometimes cause data to be passed to the final callback instead of the streaming callback. * The `.IOStream` close callback is now run more reliably if there is an exception in ``_try_inline_read``. * New method `.BaseIOStream.set_nodelay` can be used to set the ``TCP_NODELAY`` flag. * Fixed a case where errors in ``SSLIOStream.connect`` (and ``SimpleAsyncHTTPClient``) were not being reported correctly. `tornado.locale` ~~~~~~~~~~~~~~~~ * `.Locale.format_date` now works on Python 3. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * The default `.Resolver` implementation now works on Solaris. * `.Resolver` now has a `~.Resolver.close` method. * Fixed a potential CPU DoS when ``tornado.netutil.ssl_match_hostname`` is used on certificates with an abusive wildcard pattern. * All instances of `.ThreadedResolver` now share a single thread pool, whose size is set by the first one to be created (or the static ``Resolver.configure`` method). * `.ExecutorResolver` is now documented for public use. * `.bind_sockets` now works in configurations with incomplete IPv6 support. `tornado.options` ~~~~~~~~~~~~~~~~~ * `tornado.options.define` with ``multiple=True`` now works on Python 3. * `tornado.options.options` and other `.OptionParser` instances support some new dict-like methods: `~.OptionParser.items()`, iteration over keys, and (read-only) access to options with square braket syntax. `.OptionParser.group_dict` returns all options with a given group name, and `.OptionParser.as_dict` returns all options. `tornado.process` ~~~~~~~~~~~~~~~~~ * `tornado.process.Subprocess` no longer leaks file descriptors into the child process, which fixes a problem in which the child could not detect that the parent process had closed its stdin pipe. * `.Subprocess.set_exit_callback` now works for subprocesses created without an explicit ``io_loop`` parameter. `tornado.stack_context` ~~~~~~~~~~~~~~~~~~~~~~~ * `tornado.stack_context` has been rewritten and is now much faster. * New function `.run_with_stack_context` facilitates the use of stack contexts with coroutines. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ * The constructors of `.TCPServer` and `.HTTPServer` now take a ``max_buffer_size`` keyword argument. `tornado.template` ~~~~~~~~~~~~~~~~~~ * Some internal names used by the template system have been changed; now all "reserved" names in templates start with ``_tt_``. `tornado.testing` ~~~~~~~~~~~~~~~~~ * `tornado.testing.AsyncTestCase.wait` now raises the correct exception when it has been modified by `tornado.stack_context`. * `tornado.testing.gen_test` can now be called as ``@gen_test(timeout=60)`` to give some tests a longer timeout than others. * The environment variable ``ASYNC_TEST_TIMEOUT`` can now be set to override the default timeout for `.AsyncTestCase.wait` and `.gen_test`. * `.bind_unused_port` now passes ``None`` instead of ``0`` as the port to ``getaddrinfo``, which works better with some unusual network configurations. `tornado.util` ~~~~~~~~~~~~~~ * `tornado.util.import_object` now works with top-level module names that do not contain a dot. * `tornado.util.import_object` now consistently raises `ImportError` instead of `AttributeError` when it fails. `tornado.web` ~~~~~~~~~~~~~ * The ``handlers`` list passed to the `tornado.web.Application` constructor and `~tornado.web.Application.add_handlers` methods can now contain lists in addition to tuples and `~tornado.web.URLSpec` objects. * `tornado.web.StaticFileHandler` now works on Windows when the client passes an ``If-Modified-Since`` timestamp before 1970. * New method `.RequestHandler.log_exception` can be overridden to customize the logging behavior when an exception is uncaught. Most apps that currently override ``_handle_request_exception`` can now use a combination of `.RequestHandler.log_exception` and `.write_error`. * `.RequestHandler.get_argument` now raises `.MissingArgumentError` (a subclass of `tornado.web.HTTPError`, which is what it raised previously) if the argument cannot be found. * `.Application.reverse_url` now uses `.url_escape` with ``plus=False``, i.e. spaces are encoded as ``%20`` instead of ``+``. * Arguments extracted from the url path are now decoded with `.url_unescape` with ``plus=False``, so plus signs are left as-is instead of being turned into spaces. * `.RequestHandler.send_error` will now only be called once per request, even if multiple exceptions are caught by the stack context. * The `tornado.web.asynchronous` decorator is no longer necessary for methods that return a `.Future` (i.e. those that use the `.gen.coroutine` or `.return_future` decorators) * `.RequestHandler.prepare` may now be asynchronous if it returns a `.Future`. The `~tornado.web.asynchronous` decorator is not used with ``prepare``; one of the `.Future`-related decorators should be used instead. * ``RequestHandler.current_user`` may now be assigned to normally. * `.RequestHandler.redirect` no longer silently strips control characters and whitespace. It is now an error to pass control characters, newlines or tabs. * `.StaticFileHandler` has been reorganized internally and now has additional extension points that can be overridden in subclasses. * `.StaticFileHandler` now supports HTTP ``Range`` requests. `.StaticFileHandler` is still not suitable for files too large to comfortably fit in memory, but ``Range`` support is necessary in some browsers to enable seeking of HTML5 audio and video. * `.StaticFileHandler` now uses longer hashes by default, and uses the same hashes for ``Etag`` as it does for versioned urls. * `.StaticFileHandler.make_static_url` and `.RequestHandler.static_url` now have an additional keyword argument ``include_version`` to suppress the url versioning. * `.StaticFileHandler` now reads its file in chunks, which will reduce memory fragmentation. * Fixed a problem with the ``Date`` header and cookie expiration dates when the system locale is set to a non-english configuration. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * `.WebSocketHandler` now catches `.StreamClosedError` and runs `~.WebSocketHandler.on_close` immediately instead of logging a stack trace. * New method `.WebSocketHandler.set_nodelay` can be used to set the ``TCP_NODELAY`` flag. `tornado.wsgi` ~~~~~~~~~~~~~~ * Fixed an exception in `.WSGIContainer` when the connection is closed while output is being written. tornado-4.5.3/docs/releases/v3.1.1.rst000066400000000000000000000005341322420601000172740ustar00rootroot00000000000000What's new in Tornado 3.1.1 =========================== Sep 1, 2013 ----------- * `.StaticFileHandler` no longer fails if the client requests a ``Range`` that is larger than the entire file (Facebook has a crawler that does this). * `.RequestHandler.on_connection_close` now works correctly on subsequent requests of a keep-alive connection. tornado-4.5.3/docs/releases/v3.2.0.rst000066400000000000000000000161431322420601000172770ustar00rootroot00000000000000What's new in Tornado 3.2 ========================= Jan 14, 2014 ------------ Installation ~~~~~~~~~~~~ * Tornado now depends on the `backports.ssl_match_hostname `_ when running on Python 2. This will be installed automatically when using ``pip`` or ``easy_install`` * Tornado now includes an optional C extension module, which greatly improves performance of websockets. This extension will be built automatically if a C compiler is found at install time. New modules ~~~~~~~~~~~ * The `tornado.platform.asyncio` module provides integration with the ``asyncio`` module introduced in Python 3.4 (also available for Python 3.3 with ``pip install asyncio``). `tornado.auth` ~~~~~~~~~~~~~~ * Added `.GoogleOAuth2Mixin` support authentication to Google services with OAuth 2 instead of OpenID and OAuth 1. * `.FacebookGraphMixin` has been updated to use the current Facebook login URL, which saves a redirect. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ * ``TracebackFuture`` now accepts a ``timeout`` keyword argument (although it is still incorrect to use a non-zero timeout in non-blocking code). ``tornado.curl_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``tornado.curl_httpclient`` now works on Python 3 with the soon-to-be-released pycurl 7.19.3, which will officially support Python 3 for the first time. Note that there are some unofficial Python 3 ports of pycurl (Ubuntu has included one for its past several releases); these are not supported for use with Tornado. `tornado.escape` ~~~~~~~~~~~~~~~~ * `.xhtml_escape` now escapes apostrophes as well. * `tornado.escape.utf8`, `.to_unicode`, and `.native_str` now raise `TypeError` instead of `AssertionError` when given an invalid value. `tornado.gen` ~~~~~~~~~~~~~ * Coroutines may now yield dicts in addition to lists to wait for multiple tasks in parallel. * Improved performance of `tornado.gen` when yielding a `.Future` that is already done. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * `tornado.httpclient.HTTPRequest` now uses property setters so that setting attributes after construction applies the same conversions as ``__init__`` (e.g. converting the body attribute to bytes). `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * Malformed ``x-www-form-urlencoded`` request bodies will now log a warning and continue instead of causing the request to fail (similar to the existing handling of malformed ``multipart/form-data`` bodies. This is done mainly because some libraries send this content type by default even when the data is not form-encoded. * Fix some error messages for unix sockets (and other non-IP sockets) `tornado.ioloop` ~~~~~~~~~~~~~~~~ * `.IOLoop` now uses `~.IOLoop.handle_callback_exception` consistently for error logging. * `.IOLoop` now frees callback objects earlier, reducing memory usage while idle. * `.IOLoop` will no longer call `logging.basicConfig` if there is a handler defined for the root logger or for the ``tornado`` or ``tornado.application`` loggers (previously it only looked at the root logger). `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * `.IOStream` now recognizes ``ECONNABORTED`` error codes in more places (which was mainly an issue on Windows). * `.IOStream` now frees memory earlier if a connection is closed while there is data in the write buffer. * `.PipeIOStream` now handles ``EAGAIN`` error codes correctly. * `.SSLIOStream` now initiates the SSL handshake automatically without waiting for the application to try and read or write to the connection. * Swallow a spurious exception from ``set_nodelay`` when a connection has been reset. `tornado.locale` ~~~~~~~~~~~~~~~~ * `.Locale.format_date` no longer forces the use of absolute dates in Russian. `tornado.log` ~~~~~~~~~~~~~ * Fix an error from `tornado.log.enable_pretty_logging` when `sys.stderr` does not have an ``isatty`` method. * `tornado.log.LogFormatter` now accepts keyword arguments ``fmt`` and ``datefmt``. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * `.is_valid_ip` (and therefore ``HTTPRequest.remote_ip``) now rejects empty strings. * Synchronously using `.ThreadedResolver` at import time to resolve a unicode hostname no longer deadlocks. `tornado.platform.twisted` ~~~~~~~~~~~~~~~~~~~~~~~~~~ * `.TwistedResolver` now has better error handling. `tornado.process` ~~~~~~~~~~~~~~~~~ * `.Subprocess` no longer leaks file descriptors if `subprocess.Popen` fails. ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``simple_httpclient`` now applies the ``connect_timeout`` to requests that are queued and have not yet started. * On Python 2.6, ``simple_httpclient`` now uses TLSv1 instead of SSLv3. * ``simple_httpclient`` now enforces the connect timeout during DNS resolution. * The embedded ``ca-certificates.crt`` file has been updated with the current Mozilla CA list. `tornado.web` ~~~~~~~~~~~~~ * `.StaticFileHandler` no longer fails if the client requests a ``Range`` that is larger than the entire file (Facebook has a crawler that does this). * `.RequestHandler.on_connection_close` now works correctly on subsequent requests of a keep-alive connection. * New application setting ``default_handler_class`` can be used to easily set up custom 404 pages. * New application settings ``autoreload``, ``compiled_template_cache``, ``static_hash_cache``, and ``serve_traceback`` can be used to control individual aspects of debug mode. * New methods `.RequestHandler.get_query_argument` and `.RequestHandler.get_body_argument` and new attributes ``HTTPRequest.query_arguments`` and ``HTTPRequest.body_arguments`` allow access to arguments without intermingling those from the query string with those from the request body. * `.RequestHandler.decode_argument` and related methods now raise an ``HTTPError(400)`` instead of `UnicodeDecodeError` when the argument could not be decoded. * `.RequestHandler.clear_all_cookies` now accepts ``domain`` and ``path`` arguments, just like `~.RequestHandler.clear_cookie`. * It is now possible to specify handlers by name when using the `tornado.web.URLSpec` class. * `.Application` now accepts 4-tuples to specify the ``name`` parameter (which previously required constructing a `tornado.web.URLSpec` object instead of a tuple). * Fixed an incorrect error message when handler methods return a value other than None or a Future. * Exceptions will no longer be logged twice when using both ``@asynchronous`` and ``@gen.coroutine`` `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * `.WebSocketHandler.write_message` now raises `.WebSocketClosedError` instead of `AttributeError` when the connection has been closed. * `.websocket_connect` now accepts preconstructed ``HTTPRequest`` objects. * Fix a bug with `.WebSocketHandler` when used with some proxies that unconditionally modify the ``Connection`` header. * `.websocket_connect` now returns an error immediately for refused connections instead of waiting for the timeout. * `.WebSocketClientConnection` now has a ``close`` method. `tornado.wsgi` ~~~~~~~~~~~~~~ * `.WSGIContainer` now calls the iterable's ``close()`` method even if an error is raised, in compliance with the spec. tornado-4.5.3/docs/releases/v3.2.1.rst000066400000000000000000000032011322420601000172670ustar00rootroot00000000000000What's new in Tornado 3.2.1 =========================== May 5, 2014 ----------- Security fixes ~~~~~~~~~~~~~~ * The signed-value format used by `.RequestHandler.set_secure_cookie` and `.RequestHandler.get_secure_cookie` has changed to be more secure. **This is a disruptive change**. The ``secure_cookie`` functions take new ``version`` parameters to support transitions between cookie formats. * The new cookie format fixes a vulnerability that may be present in applications that use multiple cookies where the name of one cookie is a prefix of the name of another. * To minimize disruption, cookies in the older format will be accepted by default until they expire. Applications that may be vulnerable can reject all cookies in the older format by passing ``min_version=2`` to `.RequestHandler.get_secure_cookie`. * Thanks to Joost Pol of `Certified Secure `_ for reporting this issue. Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Signed cookies issued by `.RequestHandler.set_secure_cookie` in Tornado 3.2.1 cannot be read by older releases. If you need to run 3.2.1 in parallel with older releases, you can pass ``version=1`` to `.RequestHandler.set_secure_cookie` to issue cookies that are backwards-compatible (but have a known weakness, so this option should only be used for a transitional period). Other changes ~~~~~~~~~~~~~ * The C extension used to speed up the websocket module now compiles correctly on Windows with MSVC and 64-bit mode. The fallback to the pure-Python alternative now works correctly on Mac OS X machines with no C compiler installed. tornado-4.5.3/docs/releases/v3.2.2.rst000066400000000000000000000016301322420601000172740ustar00rootroot00000000000000What's new in Tornado 3.2.2 =========================== June 3, 2014 ------------ Security fixes ~~~~~~~~~~~~~~ * The XSRF token is now encoded with a random mask on each request. This makes it safe to include in compressed pages without being vulnerable to the `BREACH attack `_. This applies to most applications that use both the ``xsrf_cookies`` and ``gzip`` options (or have gzip applied by a proxy). Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * If Tornado 3.2.2 is run at the same time as older versions on the same domain, there is some potential for issues with the differing cookie versions. The `.Application` setting ``xsrf_cookie_version=1`` can be used for a transitional period to generate the older cookie format on newer servers. Other changes ~~~~~~~~~~~~~ * ``tornado.platform.asyncio`` is now compatible with ``trollius`` version 0.3. tornado-4.5.3/docs/releases/v4.0.0.rst000066400000000000000000000350741322420601000173020ustar00rootroot00000000000000What's new in Tornado 4.0 ========================= July 15, 2014 ------------- Highlights ~~~~~~~~~~ * The `tornado.web.stream_request_body` decorator allows large files to be uploaded with limited memory usage. * Coroutines are now faster and are used extensively throughout Tornado itself. More methods now return `Futures <.Future>`, including most `.IOStream` methods and `.RequestHandler.flush`. * Many user-overridden methods are now allowed to return a `.Future` for flow control. * HTTP-related code is now shared between the `tornado.httpserver`, ``tornado.simple_httpclient`` and `tornado.wsgi` modules, making support for features such as chunked and gzip encoding more consistent. `.HTTPServer` now uses new delegate interfaces defined in `tornado.httputil` in addition to its old single-callback interface. * New module `tornado.tcpclient` creates TCP connections with non-blocking DNS, SSL handshaking, and support for IPv6. Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * `tornado.concurrent.Future` is no longer thread-safe; use `concurrent.futures.Future` when thread-safety is needed. * Tornado now depends on the `certifi `_ package instead of bundling its own copy of the Mozilla CA list. This will be installed automatically when using ``pip`` or ``easy_install``. * This version includes the changes to the secure cookie format first introduced in version :doc:`3.2.1 `, and the xsrf token change in version :doc:`3.2.2 `. If you are upgrading from an earlier version, see those versions' release notes. * WebSocket connections from other origin sites are now rejected by default. To accept cross-origin websocket connections, override the new method `.WebSocketHandler.check_origin`. * `.WebSocketHandler` no longer supports the old ``draft 76`` protocol (this mainly affects Safari 5.x browsers). Applications should use non-websocket workarounds for these browsers. * Authors of alternative `.IOLoop` implementations should see the changes to `.IOLoop.add_handler` in this release. * The ``RequestHandler.async_callback`` and ``WebSocketHandler.async_callback`` wrapper functions have been removed; they have been obsolete for a long time due to stack contexts (and more recently coroutines). * ``curl_httpclient`` now requires a minimum of libcurl version 7.21.1 and pycurl 7.18.2. * Support for ``RequestHandler.get_error_html`` has been removed; override `.RequestHandler.write_error` instead. Other notes ~~~~~~~~~~~ * The git repository has moved to https://github.com/tornadoweb/tornado. All old links should be redirected to the new location. * An `announcement mailing list `_ is now available. * All Tornado modules are now importable on Google App Engine (although the App Engine environment does not allow the system calls used by `.IOLoop` so many modules are still unusable). `tornado.auth` ~~~~~~~~~~~~~~ * Fixed a bug in ``.FacebookMixin`` on Python 3. * When using the `.Future` interface, exceptions are more reliably delivered to the caller. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ * `tornado.concurrent.Future` is now always thread-unsafe (previously it would be thread-safe if the `concurrent.futures` package was available). This improves performance and provides more consistent semantics. The parts of Tornado that accept Futures will accept both Tornado's thread-unsafe Futures and the thread-safe `concurrent.futures.Future`. * `tornado.concurrent.Future` now includes all the functionality of the old ``TracebackFuture`` class. ``TracebackFuture`` is now simply an alias for ``Future``. ``tornado.curl_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``curl_httpclient`` now passes along the HTTP "reason" string in ``response.reason``. `tornado.gen` ~~~~~~~~~~~~~ * Performance of coroutines has been improved. * Coroutines no longer generate ``StackContexts`` by default, but they will be created on demand when needed. * The internals of the `tornado.gen` module have been rewritten to improve performance when using ``Futures``, at the expense of some performance degradation for the older `.YieldPoint` interfaces. * New function `.with_timeout` wraps a `.Future` and raises an exception if it doesn't complete in a given amount of time. * New object `.moment` can be yielded to allow the IOLoop to run for one iteration before resuming. * `.Task` is now a function returning a `.Future` instead of a `.YieldPoint` subclass. This change should be transparent to application code, but allows `.Task` to take advantage of the newly-optimized `.Future` handling. `tornado.http1connection` ~~~~~~~~~~~~~~~~~~~~~~~~~ * New module contains the HTTP implementation shared by `tornado.httpserver` and ``tornado.simple_httpclient``. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * The command-line HTTP client (``python -m tornado.httpclient $URL``) now works on Python 3. * Fixed a memory leak in `.AsyncHTTPClient` shutdown that affected applications that created many HTTP clients and IOLoops. * New client request parameter ``decompress_response`` replaces the existing ``use_gzip`` parameter; both names are accepted. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * ``tornado.httpserver.HTTPRequest`` has moved to `tornado.httputil.HTTPServerRequest`. * HTTP implementation has been unified with ``tornado.simple_httpclient`` in `tornado.http1connection`. * Now supports ``Transfer-Encoding: chunked`` for request bodies. * Now supports ``Content-Encoding: gzip`` for request bodies if ``decompress_request=True`` is passed to the `.HTTPServer` constructor. * The ``connection`` attribute of `.HTTPServerRequest` is now documented for public use; applications are expected to write their responses via the `.HTTPConnection` interface. * The `.HTTPServerRequest.write` and `.HTTPServerRequest.finish` methods are now deprecated. (`.RequestHandler.write` and `.RequestHandler.finish` are *not* deprecated; this only applies to the methods on `.HTTPServerRequest`) * `.HTTPServer` now supports `.HTTPServerConnectionDelegate` in addition to the old ``request_callback`` interface. The delegate interface supports streaming of request bodies. * `.HTTPServer` now detects the error of an application sending a ``Content-Length`` error that is inconsistent with the actual content. * New constructor arguments ``max_header_size`` and ``max_body_size`` allow separate limits to be set for different parts of the request. ``max_body_size`` is applied even in streaming mode. * New constructor argument ``chunk_size`` can be used to limit the amount of data read into memory at one time per request. * New constructor arguments ``idle_connection_timeout`` and ``body_timeout`` allow time limits to be placed on the reading of requests. * Form-encoded message bodies are now parsed for all HTTP methods, not just ``POST``, ``PUT``, and ``PATCH``. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ * `.HTTPServerRequest` was moved to this module from `tornado.httpserver`. * New base classes `.HTTPConnection`, `.HTTPServerConnectionDelegate`, and `.HTTPMessageDelegate` define the interaction between applications and the HTTP implementation. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * `.IOLoop.add_handler` and related methods now accept file-like objects in addition to raw file descriptors. Passing the objects is recommended (when possible) to avoid a garbage-collection-related problem in unit tests. * New method `.IOLoop.clear_instance` makes it possible to uninstall the singleton instance. * Timeout scheduling is now more robust against slow callbacks. * `.IOLoop.add_timeout` is now a bit more efficient. * When a function run by the `.IOLoop` returns a `.Future` and that `.Future` has an exception, the `.IOLoop` will log the exception. * New method `.IOLoop.spawn_callback` simplifies the process of launching a fire-and-forget callback that is separated from the caller's stack context. * New methods `.IOLoop.call_later` and `.IOLoop.call_at` simplify the specification of relative or absolute timeouts (as opposed to `~.IOLoop.add_timeout`, which used the type of its argument). `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * The ``callback`` argument to most `.IOStream` methods is now optional. When called without a callback the method will return a `.Future` for use with coroutines. * New method `.IOStream.start_tls` converts an `.IOStream` to an `.SSLIOStream`. * No longer gets confused when an ``IOError`` or ``OSError`` without an ``errno`` attribute is raised. * `.BaseIOStream.read_bytes` now accepts a ``partial`` keyword argument, which can be used to return before the full amount has been read. This is a more coroutine-friendly alternative to ``streaming_callback``. * `.BaseIOStream.read_until` and ``read_until_regex`` now acept a ``max_bytes`` keyword argument which will cause the request to fail if it cannot be satisfied from the given number of bytes. * `.IOStream` no longer reads from the socket into memory if it does not need data to satisfy a pending read. As a side effect, the close callback will not be run immediately if the other side closes the connection while there is unconsumed data in the buffer. * The default ``chunk_size`` has been increased to 64KB (from 4KB) * The `.IOStream` constructor takes a new keyword argument ``max_write_buffer_size`` (defaults to unlimited). Calls to `.BaseIOStream.write` will raise `.StreamBufferFullError` if the amount of unsent buffered data exceeds this limit. * ``ETIMEDOUT`` errors are no longer logged. If you need to distinguish timeouts from other forms of closed connections, examine ``stream.error`` from a close callback. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * When `.bind_sockets` chooses a port automatically, it will now use the same port for IPv4 and IPv6. * TLS compression is now disabled by default on Python 3.3 and higher (it is not possible to change this option in older versions). `tornado.options` ~~~~~~~~~~~~~~~~~ * It is now possible to disable the default logging configuration by setting ``options.logging`` to ``None`` instead of the string ``"none"``. `tornado.platform.asyncio` ~~~~~~~~~~~~~~~~~~~~~~~~~~ * Now works on Python 2.6. * Now works with Trollius version 0.3. `tornado.platform.twisted` ~~~~~~~~~~~~~~~~~~~~~~~~~~ * `.TwistedIOLoop` now works on Python 3.3+ (with Twisted 14.0.0+). ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``simple_httpclient`` has better support for IPv6, which is now enabled by default. * Improved default cipher suite selection (Python 2.7+). * HTTP implementation has been unified with ``tornado.httpserver`` in `tornado.http1connection` * Streaming request bodies are now supported via the ``body_producer`` keyword argument to `tornado.httpclient.HTTPRequest`. * The ``expect_100_continue`` keyword argument to `tornado.httpclient.HTTPRequest` allows the use of the HTTP ``Expect: 100-continue`` feature. * ``simple_httpclient`` now raises the original exception (e.g. an `IOError`) in more cases, instead of converting everything to ``HTTPError``. `tornado.stack_context` ~~~~~~~~~~~~~~~~~~~~~~~ * The stack context system now has less performance overhead when no stack contexts are active. `tornado.tcpclient` ~~~~~~~~~~~~~~~~~~~ * New module which creates TCP connections and IOStreams, including name resolution, connecting, and SSL handshakes. `tornado.testing` ~~~~~~~~~~~~~~~~~ * `.AsyncTestCase` now attempts to detect test methods that are generators but were not run with ``@gen_test`` or any similar decorator (this would previously result in the test silently being skipped). * Better stack traces are now displayed when a test times out. * The ``@gen_test`` decorator now passes along ``*args, **kwargs`` so it can be used on functions with arguments. * Fixed the test suite when ``unittest2`` is installed on Python 3. `tornado.web` ~~~~~~~~~~~~~ * It is now possible to support streaming request bodies with the `.stream_request_body` decorator and the new `.RequestHandler.data_received` method. * `.RequestHandler.flush` now returns a `.Future` if no callback is given. * New exception `.Finish` may be raised to finish a request without triggering error handling. * When gzip support is enabled, all ``text/*`` mime types will be compressed, not just those on a whitelist. * `.Application` now implements the `.HTTPMessageDelegate` interface. * ``HEAD`` requests in `.StaticFileHandler` no longer read the entire file. * `.StaticFileHandler` now streams response bodies to the client. * New setting ``compress_response`` replaces the existing ``gzip`` setting; both names are accepted. * XSRF cookies that were not generated by this module (i.e. strings without any particular formatting) are once again accepted (as long as the cookie and body/header match). This pattern was common for testing and non-browser clients but was broken by the changes in Tornado 3.2.2. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * WebSocket connections from other origin sites are now rejected by default. Browsers do not use the same-origin policy for WebSocket connections as they do for most other browser-initiated communications. This can be surprising and a security risk, so we disallow these connections on the server side by default. To accept cross-origin websocket connections, override the new method `.WebSocketHandler.check_origin`. * `.WebSocketHandler.close` and `.WebSocketClientConnection.close` now support ``code`` and ``reason`` arguments to send a status code and message to the other side of the connection when closing. Both classes also have ``close_code`` and ``close_reason`` attributes to receive these values when the other side closes. * The C speedup module now builds correctly with MSVC, and can support messages larger than 2GB on 64-bit systems. * The fallback mechanism for detecting a missing C compiler now works correctly on Mac OS X. * Arguments to `.WebSocketHandler.open` are now decoded in the same way as arguments to `.RequestHandler.get` and similar methods. * It is now allowed to override ``prepare`` in a `.WebSocketHandler`, and this method may generate HTTP responses (error pages) in the usual way. The HTTP response methods are still not allowed once the WebSocket handshake has completed. `tornado.wsgi` ~~~~~~~~~~~~~~ * New class `.WSGIAdapter` supports running a Tornado `.Application` on a WSGI server in a way that is more compatible with Tornado's non-WSGI `.HTTPServer`. `.WSGIApplication` is deprecated in favor of using `.WSGIAdapter` with a regular `.Application`. * `.WSGIAdapter` now supports gzipped output. tornado-4.5.3/docs/releases/v4.0.1.rst000066400000000000000000000015351322420601000172760ustar00rootroot00000000000000What's new in Tornado 4.0.1 =========================== Aug 12, 2014 ------------ * The build will now fall back to pure-python mode if the C extension fails to build for any reason (previously it would fall back for some errors but not others). * `.IOLoop.call_at` and `.IOLoop.call_later` now always return a timeout handle for use with `.IOLoop.remove_timeout`. * If any callback of a `.PeriodicCallback` or `.IOStream` returns a `.Future`, any error raised in that future will now be logged (similar to the behavior of `.IOLoop.add_callback`). * Fixed an exception in client-side websocket connections when the connection is closed. * ``simple_httpclient`` once again correctly handles 204 status codes with no content-length header. * Fixed a regression in ``simple_httpclient`` that would result in timeouts for certain kinds of errors. tornado-4.5.3/docs/releases/v4.0.2.rst000066400000000000000000000014701322420601000172750ustar00rootroot00000000000000What's new in Tornado 4.0.2 =========================== Sept 10, 2014 ------------- Bug fixes ~~~~~~~~~ * Fixed a bug that could sometimes cause a timeout to fire after being cancelled. * `.AsyncTestCase` once again passes along arguments to test methods, making it compatible with extensions such as Nose's test generators. * `.StaticFileHandler` can again compress its responses when gzip is enabled. * ``simple_httpclient`` passes its ``max_buffer_size`` argument to the underlying stream. * Fixed a reference cycle that can lead to increased memory consumption. * `.add_accept_handler` will now limit the number of times it will call `~socket.socket.accept` per `.IOLoop` iteration, addressing a potential starvation issue. * Improved error handling in `.IOStream.connect` (primarily for FreeBSD systems) tornado-4.5.3/docs/releases/v4.1.0.rst000066400000000000000000000172511322420601000173000ustar00rootroot00000000000000What's new in Tornado 4.1 ========================= Feb 7, 2015 ----------- Highlights ~~~~~~~~~~ * If a `.Future` contains an exception but that exception is never examined or re-raised (e.g. by yielding the `.Future`), a stack trace will be logged when the `.Future` is garbage-collected. * New class `tornado.gen.WaitIterator` provides a way to iterate over ``Futures`` in the order they resolve. * The `tornado.websocket` module now supports compression via the "permessage-deflate" extension. Override `.WebSocketHandler.get_compression_options` to enable on the server side, and use the ``compression_options`` keyword argument to `.websocket_connect` on the client side. * When the appropriate packages are installed, it is possible to yield `asyncio.Future` or Twisted ``Defered`` objects in Tornado coroutines. Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * `.HTTPServer` now calls ``start_request`` with the correct arguments. This change is backwards-incompatible, affecting any application which implemented `.HTTPServerConnectionDelegate` by following the example of `.Application` instead of the documented method signatures. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ * If a `.Future` contains an exception but that exception is never examined or re-raised (e.g. by yielding the `.Future`), a stack trace will be logged when the `.Future` is garbage-collected. * `.Future` now catches and logs exceptions in its callbacks. ``tornado.curl_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``tornado.curl_httpclient`` now supports request bodies for ``PATCH`` and custom methods. * ``tornado.curl_httpclient`` now supports resubmitting bodies after following redirects for methods other than ``POST``. * ``curl_httpclient`` now runs the streaming and header callbacks on the IOLoop. * ``tornado.curl_httpclient`` now uses its own logger for debug output so it can be filtered more easily. `tornado.gen` ~~~~~~~~~~~~~ * New class `tornado.gen.WaitIterator` provides a way to iterate over ``Futures`` in the order they resolve. * When the `~functools.singledispatch` library is available (standard on Python 3.4, available via ``pip install singledispatch`` on older versions), the `.convert_yielded` function can be used to make other kinds of objects yieldable in coroutines. * New function `tornado.gen.sleep` is a coroutine-friendly analogue to `time.sleep`. * `.gen.engine` now correctly captures the stack context for its callbacks. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * `tornado.httpclient.HTTPRequest` accepts a new argument ``raise_error=False`` to suppress the default behavior of raising an error for non-200 response codes. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * `.HTTPServer` now calls ``start_request`` with the correct arguments. This change is backwards-incompatible, afffecting any application which implemented `.HTTPServerConnectionDelegate` by following the example of `.Application` instead of the documented method signatures. * `.HTTPServer` now tolerates extra newlines which are sometimes inserted between requests on keep-alive connections. * `.HTTPServer` can now use keep-alive connections after a request with a chunked body. * `.HTTPServer` now always reports ``HTTP/1.1`` instead of echoing the request version. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ * New function `tornado.httputil.split_host_and_port` for parsing the ``netloc`` portion of URLs. * The ``context`` argument to `.HTTPServerRequest` is now optional, and if a context is supplied the ``remote_ip`` attribute is also optional. * `.HTTPServerRequest.body` is now always a byte string (previously the default empty body would be a unicode string on python 3). * Header parsing now works correctly when newline-like unicode characters are present. * Header parsing again supports both CRLF and bare LF line separators. * Malformed ``multipart/form-data`` bodies will always be logged quietly instead of raising an unhandled exception; previously the behavior was inconsistent depending on the exact error. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * The ``kqueue`` and ``select`` IOLoop implementations now report writeability correctly, fixing flow control in IOStream. * When a new `.IOLoop` is created, it automatically becomes "current" for the thread if there is not already a current instance. * New method `.PeriodicCallback.is_running` can be used to see whether the `.PeriodicCallback` has been started. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * `.IOStream.start_tls` now uses the ``server_hostname`` parameter for certificate validation. * `.SSLIOStream` will no longer consume 100% CPU after certain error conditions. * `.SSLIOStream` no longer logs ``EBADF`` errors during the handshake as they can result from nmap scans in certain modes. `tornado.options` ~~~~~~~~~~~~~~~~~ * `~tornado.options.parse_config_file` now always decodes the config file as utf8 on Python 3. * `tornado.options.define` more accurately finds the module defining the option. ``tornado.platform.asyncio`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * It is now possible to yield ``asyncio.Future`` objects in coroutines when the `~functools.singledispatch` library is available and ``tornado.platform.asyncio`` has been imported. * New methods `tornado.platform.asyncio.to_tornado_future` and `~tornado.platform.asyncio.to_asyncio_future` convert between the two libraries' `.Future` classes. ``tornado.platform.twisted`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * It is now possible to yield ``Deferred`` objects in coroutines when the `~functools.singledispatch` library is available and ``tornado.platform.twisted`` has been imported. `tornado.tcpclient` ~~~~~~~~~~~~~~~~~~~ * `.TCPClient` will no longer raise an exception due to an ill-timed timeout. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ * `.TCPServer` no longer ignores its ``read_chunk_size`` argument. `tornado.testing` ~~~~~~~~~~~~~~~~~ * `.AsyncTestCase` has better support for multiple exceptions. Previously it would silently swallow all but the last; now it raises the first and logs all the rest. * `.AsyncTestCase` now cleans up `.Subprocess` state on ``tearDown`` when necessary. `tornado.web` ~~~~~~~~~~~~~ * The `.asynchronous` decorator now understands `concurrent.futures.Future` in addition to `tornado.concurrent.Future`. * `.StaticFileHandler` no longer logs a stack trace if the connection is closed while sending the file. * `.RequestHandler.send_error` now supports a ``reason`` keyword argument, similar to `tornado.web.HTTPError`. * `.RequestHandler.locale` now has a property setter. * `.Application.add_handlers` hostname matching now works correctly with IPv6 literals. * Redirects for the `.Application` ``default_host`` setting now match the request protocol instead of redirecting HTTPS to HTTP. * Malformed ``_xsrf`` cookies are now ignored instead of causing uncaught exceptions. * ``Application.start_request`` now has the same signature as `.HTTPServerConnectionDelegate.start_request`. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * The `tornado.websocket` module now supports compression via the "permessage-deflate" extension. Override `.WebSocketHandler.get_compression_options` to enable on the server side, and use the ``compression_options`` keyword argument to `.websocket_connect` on the client side. * `.WebSocketHandler` no longer logs stack traces when the connection is closed. * `.WebSocketHandler.open` now accepts ``*args, **kw`` for consistency with ``RequestHandler.get`` and related methods. * The ``Sec-WebSocket-Version`` header now includes all supported versions. * `.websocket_connect` now has a ``on_message_callback`` keyword argument for callback-style use without ``read_message()``. tornado-4.5.3/docs/releases/v4.2.0.rst000066400000000000000000000211361322420601000172760ustar00rootroot00000000000000What's new in Tornado 4.2 ========================= May 26, 2015 ------------ Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``SSLIOStream.connect`` and `.IOStream.start_tls` now validate certificates by default. * Certificate validation will now use the system CA root certificates instead of ``certifi`` when possible (i.e. Python 2.7.9+ or 3.4+). This includes `.IOStream` and ``simple_httpclient``, but not ``curl_httpclient``. * The default SSL configuration has become stricter, using `ssl.create_default_context` where available on the client side. (On the server side, applications are encouraged to migrate from the ``ssl_options`` dict-based API to pass an `ssl.SSLContext` instead). * The deprecated classes in the `tornado.auth` module, ``GoogleMixin``, ``FacebookMixin``, and ``FriendFeedMixin`` have been removed. New modules: `tornado.locks` and `tornado.queues` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These modules provide classes for coordinating coroutines, merged from `Toro `_. To port your code from Toro's queues to Tornado 4.2, import `.Queue`, `.PriorityQueue`, or `.LifoQueue` from `tornado.queues` instead of from ``toro``. Use `.Queue` instead of Toro's ``JoinableQueue``. In Tornado the methods `~.Queue.join` and `~.Queue.task_done` are available on all queues, not on a special ``JoinableQueue``. Tornado queues raise exceptions specific to Tornado instead of reusing exceptions from the Python standard library. Therefore instead of catching the standard `queue.Empty` exception from `.Queue.get_nowait`, catch the special `tornado.queues.QueueEmpty` exception, and instead of catching the standard `queue.Full` from `.Queue.get_nowait`, catch `tornado.queues.QueueFull`. To port from Toro's locks to Tornado 4.2, import `.Condition`, `.Event`, `.Semaphore`, `.BoundedSemaphore`, or `.Lock` from `tornado.locks` instead of from ``toro``. Toro's ``Semaphore.wait`` allowed a coroutine to wait for the semaphore to be unlocked *without* acquiring it. This encouraged unorthodox patterns; in Tornado, just use `~.Semaphore.acquire`. Toro's ``Event.wait`` raised a ``Timeout`` exception after a timeout. In Tornado, `.Event.wait` raises `tornado.gen.TimeoutError`. Toro's ``Condition.wait`` also raised ``Timeout``, but in Tornado, the `.Future` returned by `.Condition.wait` resolves to False after a timeout:: @gen.coroutine def await_notification(): if not (yield condition.wait(timeout=timedelta(seconds=1))): print('timed out') else: print('condition is true') In lock and queue methods, wherever Toro accepted ``deadline`` as a keyword argument, Tornado names the argument ``timeout`` instead. Toro's ``AsyncResult`` is not merged into Tornado, nor its exceptions ``NotReady`` and ``AlreadySet``. Use a `.Future` instead. If you wrote code like this:: from tornado import gen import toro result = toro.AsyncResult() @gen.coroutine def setter(): result.set(1) @gen.coroutine def getter(): value = yield result.get() print(value) # Prints "1". Then the Tornado equivalent is:: from tornado import gen from tornado.concurrent import Future result = Future() @gen.coroutine def setter(): result.set_result(1) @gen.coroutine def getter(): value = yield result print(value) # Prints "1". `tornado.autoreload` ~~~~~~~~~~~~~~~~~~~~ * Improved compatibility with Windows. * Fixed a bug in Python 3 if a module was imported during a reload check. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ * `.run_on_executor` now accepts arguments to control which attributes it uses to find the `.IOLoop` and executor. `tornado.curl_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~ * Fixed a bug that would cause the client to stop processing requests if an exception occurred in certain places while there is a queue. `tornado.escape` ~~~~~~~~~~~~~~~~ * `.xhtml_escape` now supports numeric character references in hex format (`` ``) `tornado.gen` ~~~~~~~~~~~~~ * `.WaitIterator` no longer uses weak references, which fixes several garbage-collection-related bugs. * ``tornado.gen.Multi`` and `tornado.gen.multi_future` (which are used when yielding a list or dict in a coroutine) now log any exceptions after the first if more than one `.Future` fails (previously they would be logged when the `.Future` was garbage-collected, but this is more reliable). Both have a new keyword argument ``quiet_exceptions`` to suppress logging of certain exception types; to use this argument you must call ``Multi`` or ``multi_future`` directly instead of simply yielding a list. * `.multi_future` now works when given multiple copies of the same `.Future`. * On Python 3, catching an exception in a coroutine no longer leads to leaks via ``Exception.__context__``. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * The ``raise_error`` argument now works correctly with the synchronous `.HTTPClient`. * The synchronous `.HTTPClient` no longer interferes with `.IOLoop.current()`. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * `.HTTPServer` is now a subclass of `tornado.util.Configurable`. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ * `.HTTPHeaders` can now be copied with `copy.copy` and `copy.deepcopy`. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * The `.IOLoop` constructor now has a ``make_current`` keyword argument to control whether the new `.IOLoop` becomes `.IOLoop.current()`. * Third-party implementations of `.IOLoop` should accept ``**kwargs`` in their `~.IOLoop.initialize` methods and pass them to the superclass implementation. * `.PeriodicCallback` is now more efficient when the clock jumps forward by a large amount. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * ``SSLIOStream.connect`` and `.IOStream.start_tls` now validate certificates by default. * New method `.SSLIOStream.wait_for_handshake` allows server-side applications to wait for the handshake to complete in order to verify client certificates or use NPN/ALPN. * The `.Future` returned by ``SSLIOStream.connect`` now resolves after the handshake is complete instead of as soon as the TCP connection is established. * Reduced logging of SSL errors. * `.BaseIOStream.read_until_close` now works correctly when a ``streaming_callback`` is given but ``callback`` is None (i.e. when it returns a `.Future`) `tornado.locale` ~~~~~~~~~~~~~~~~ * New method `.GettextLocale.pgettext` allows additional context to be supplied for gettext translations. `tornado.log` ~~~~~~~~~~~~~ * `.define_logging_options` now works correctly when given a non-default ``options`` object. `tornado.process` ~~~~~~~~~~~~~~~~~ * New method `.Subprocess.wait_for_exit` is a coroutine-friendly version of `.Subprocess.set_exit_callback`. `tornado.simple_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Improved performance on Python 3 by reusing a single `ssl.SSLContext`. * New constructor argument ``max_body_size`` controls the maximum response size the client is willing to accept. It may be bigger than ``max_buffer_size`` if ``streaming_callback`` is used. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ * `.TCPServer.handle_stream` may be a coroutine (so that any exceptions it raises will be logged). `tornado.util` ~~~~~~~~~~~~~~ * `.import_object` now supports unicode strings on Python 2. * `.Configurable.initialize` now supports positional arguments. `tornado.web` ~~~~~~~~~~~~~ * Key versioning support for cookie signing. ``cookie_secret`` application setting can now contain a dict of valid keys with version as key. The current signing key then must be specified via ``key_version`` setting. * Parsing of the ``If-None-Match`` header now follows the RFC and supports weak validators. * Passing ``secure=False`` or ``httponly=False`` to `.RequestHandler.set_cookie` now works as expected (previously only the presence of the argument was considered and its value was ignored). * `.RequestHandler.get_arguments` now requires that its ``strip`` argument be of type bool. This helps prevent errors caused by the slightly dissimilar interfaces between the singular and plural methods. * Errors raised in ``_handle_request_exception`` are now logged more reliably. * `.RequestHandler.redirect` now works correctly when called from a handler whose path begins with two slashes. * Passing messages containing ``%`` characters to `tornado.web.HTTPError` no longer causes broken error messages. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * The ``on_close`` method will no longer be called more than once. * When the other side closes a connection, we now echo the received close code back instead of sending an empty close frame. tornado-4.5.3/docs/releases/v4.2.1.rst000066400000000000000000000005051322420601000172740ustar00rootroot00000000000000What's new in Tornado 4.2.1 =========================== Jul 17, 2015 ------------ Security fix ~~~~~~~~~~~~ * This release fixes a path traversal vulnerability in `.StaticFileHandler`, in which files whose names *started with* the ``static_path`` directory but were not actually *in* that directory could be accessed. tornado-4.5.3/docs/releases/v4.3.0.rst000066400000000000000000000155071322420601000173040ustar00rootroot00000000000000What's new in Tornado 4.3 ========================= Nov 6, 2015 ----------- Highlights ~~~~~~~~~~ * The new async/await keywords in Python 3.5 are supported. In most cases, ``async def`` can be used in place of the ``@gen.coroutine`` decorator. Inside a function defined with ``async def``, use ``await`` instead of ``yield`` to wait on an asynchronous operation. Coroutines defined with async/await will be faster than those defined with ``@gen.coroutine`` and ``yield``, but do not support some features including `.Callback`/`.Wait` or the ability to yield a Twisted ``Deferred``. See :ref:`the users' guide ` for more. * The async/await keywords are also available when compiling with Cython in older versions of Python. Deprecation notice ~~~~~~~~~~~~~~~~~~ * This will be the last release of Tornado to support Python 2.6 or 3.2. Note that PyPy3 will continue to be supported even though it implements a mix of Python 3.2 and 3.3 features. Installation ~~~~~~~~~~~~ * Tornado has several new dependencies: ``ordereddict`` on Python 2.6, ``singledispatch`` on all Python versions prior to 3.4 (This was an optional dependency in prior versions of Tornado, and is now mandatory), and ``backports_abc>=0.4`` on all versions prior to 3.5. These dependencies will be installed automatically when installing with ``pip`` or ``setup.py install``. These dependencies will not be required when running on Google App Engine. * Binary wheels are provided for Python 3.5 on Windows (32 and 64 bit). `tornado.auth` ~~~~~~~~~~~~~~ * New method `.OAuth2Mixin.oauth2_request` can be used to make authenticated requests with an access token. * Now compatible with callbacks that have been compiled with Cython. `tornado.autoreload` ~~~~~~~~~~~~~~~~~~~~ * Fixed an issue with the autoreload command-line wrapper in which imports would be incorrectly interpreted as relative. `tornado.curl_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~ * Fixed parsing of multi-line headers. * ``allow_nonstandard_methods=True`` now bypasses body sanity checks, in the same way as in ``simple_httpclient``. * The ``PATCH`` method now allows a body without ``allow_nonstandard_methods=True``. `tornado.gen` ~~~~~~~~~~~~~ * `.WaitIterator` now supports the ``async for`` statement on Python 3.5. * ``@gen.coroutine`` can be applied to functions compiled with Cython. On python versions prior to 3.5, the ``backports_abc`` package must be installed for this functionality. * ``Multi`` and `.multi_future` are deprecated and replaced by a unified function `.multi`. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * `tornado.httpclient.HTTPError` is now copyable with the `copy` module. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * Requests containing both ``Content-Length`` and ``Transfer-Encoding`` will be treated as an error. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ * `.HTTPHeaders` can now be pickled and unpickled. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * ``IOLoop(make_current=True)`` now works as intended instead of raising an exception. * The Twisted and asyncio IOLoop implementations now clear ``current()`` when they exit, like the standard IOLoops. * `.IOLoop.add_callback` is faster in the single-threaded case. * `.IOLoop.add_callback` no longer raises an error when called on a closed IOLoop, but the callback will not be invoked. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ * Coroutine-style usage of `.IOStream` now converts most errors into `.StreamClosedError`, which has the effect of reducing log noise from exceptions that are outside the application's control (especially SSL errors). * `.StreamClosedError` now has a ``real_error`` attribute which indicates why the stream was closed. It is the same as the ``error`` attribute of `.IOStream` but may be more easily accessible than the `.IOStream` itself. * Improved error handling in `~.BaseIOStream.read_until_close`. * Logging is less noisy when an SSL server is port scanned. * ``EINTR`` is now handled on all reads. `tornado.locale` ~~~~~~~~~~~~~~~~ * `tornado.locale.load_translations` now accepts encodings other than UTF-8. UTF-16 and UTF-8 will be detected automatically if a BOM is present; for other encodings `.load_translations` has an ``encoding`` parameter. `tornado.locks` ~~~~~~~~~~~~~~~ * `.Lock` and `.Semaphore` now support the ``async with`` statement on Python 3.5. `tornado.log` ~~~~~~~~~~~~~ * A new time-based log rotation mode is available with ``--log_rotate_mode=time``, ``--log-rotate-when``, and ``log-rotate-interval``. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * `.bind_sockets` now supports ``SO_REUSEPORT`` with the ``reuse_port=True`` argument. `tornado.options` ~~~~~~~~~~~~~~~~~ * Dashes and underscores are now fully interchangeable in option names. `tornado.queues` ~~~~~~~~~~~~~~~~ * `.Queue` now supports the ``async for`` statement on Python 3.5. `tornado.simple_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * When following redirects, ``streaming_callback`` and ``header_callback`` will no longer be run on the redirect responses (only the final non-redirect). * Responses containing both ``Content-Length`` and ``Transfer-Encoding`` will be treated as an error. `tornado.template` ~~~~~~~~~~~~~~~~~~ * `tornado.template.ParseError` now includes the filename in addition to line number. * Whitespace handling has become more configurable. The `.Loader` constructor now has a ``whitespace`` argument, there is a new ``template_whitespace`` `.Application` setting, and there is a new ``{% whitespace %}`` template directive. All of these options take a mode name defined in the `tornado.template.filter_whitespace` function. The default mode is ``single``, which is the same behavior as prior versions of Tornado. * Non-ASCII filenames are now supported. `tornado.testing` ~~~~~~~~~~~~~~~~~ * `.ExpectLog` objects now have a boolean ``logged_stack`` attribute to make it easier to test whether an exception stack trace was logged. `tornado.web` ~~~~~~~~~~~~~ * The hard limit of 4000 bytes per outgoing header has been removed. * `.StaticFileHandler` returns the correct ``Content-Type`` for files with ``.gz``, ``.bz2``, and ``.xz`` extensions. * Responses smaller than 1000 bytes will no longer be compressed. * The default gzip compression level is now 6 (was 9). * Fixed a regression in Tornado 4.2.1 that broke `.StaticFileHandler` with a ``path`` of ``/``. * `tornado.web.HTTPError` is now copyable with the `copy` module. * The exception `.Finish` now accepts an argument which will be passed to the method `.RequestHandler.finish`. * New `.Application` setting ``xsrf_cookie_kwargs`` can be used to set additional attributes such as ``secure`` or ``httponly`` on the XSRF cookie. * `.Application.listen` now returns the `.HTTPServer` it created. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * Fixed handling of continuation frames when compression is enabled. tornado-4.5.3/docs/releases/v4.4.0.rst000066400000000000000000000051151322420601000172770ustar00rootroot00000000000000What's new in Tornado 4.4 ========================= Jul 15, 2016 ------------ General ~~~~~~~ * Tornado now requires Python 2.7 or 3.3+; versions 2.6 and 3.2 are no longer supported. Pypy3 is still supported even though its latest release is mainly based on Python 3.2. * The `monotonic `_ package is now supported as an alternative to `Monotime `_ for monotonic clock support on Python 2. ``tornado.curl_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Failures in ``_curl_setup_request`` no longer cause the ``max_clients`` pool to be exhausted. * Non-ascii header values are now handled correctly. `tornado.gen` ~~~~~~~~~~~~~ * `.with_timeout` now accepts any yieldable object (except `.YieldPoint`), not just `tornado.concurrent.Future`. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ * The errors raised by timeouts now indicate what state the request was in; the error message is no longer simply "599 Timeout". * Calling `repr` on a `tornado.httpclient.HTTPError` no longer raises an error. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ * Int-like enums (including `http.HTTPStatus`) can now be used as status codes. * Responses with status code ``204 No Content`` no longer emit a ``Content-Length: 0`` header. `tornado.ioloop` ~~~~~~~~~~~~~~~~ * Improved performance when there are large numbers of active timeouts. `tornado.netutil` ~~~~~~~~~~~~~~~~~ * All included `.Resolver` implementations raise `IOError` (or a subclass) for any resolution failure. `tornado.options` ~~~~~~~~~~~~~~~~~ * Options can now be modified with subscript syntax in addition to attribute syntax. * The special variable ``__file__`` is now available inside config files. ``tornado.simple_httpclient`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * HTTP/1.0 (not 1.1) responses without a ``Content-Length`` header now work correctly. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ * `.TCPServer.bind` now accepts a ``reuse_port`` argument. `tornado.testing` ~~~~~~~~~~~~~~~~~ * Test sockets now always use ``127.0.0.1`` instead of ``localhost``. This avoids conflicts when the automatically-assigned port is available on IPv4 but not IPv6, or in unusual network configurations when ``localhost`` has multiple IP addresses. `tornado.web` ~~~~~~~~~~~~~ * ``image/svg+xml`` is now on the list of compressible mime types. * Fixed an error on Python 3 when compression is used with multiple ``Vary`` headers. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ * ``WebSocketHandler.__init__`` now uses `super`, which improves support for multiple inheritance. tornado-4.5.3/docs/releases/v4.4.1.rst000066400000000000000000000003701322420601000172760ustar00rootroot00000000000000What's new in Tornado 4.4.1 =========================== Jul 23, 2016 ------------ `tornado.web` ~~~~~~~~~~~~~ * Fixed a regression in Tornado 4.4 which caused URL regexes containing backslash escapes outside capturing groups to be rejected. tornado-4.5.3/docs/releases/v4.4.2.rst000066400000000000000000000013631322420601000173020ustar00rootroot00000000000000What's new in Tornado 4.4.2 =========================== Oct 1, 2016 ------------ Security fixes ~~~~~~~~~~~~~~ * A difference in cookie parsing between Tornado and web browsers (especially when combined with Google Analytics) could allow an attacker to set arbitrary cookies and bypass XSRF protection. The cookie parser has been rewritten to fix this attack. Backwards-compatibility notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Cookies containing certain special characters (in particular semicolon and square brackets) are now parsed differently. * If the cookie header contains a combination of valid and invalid cookies, the valid ones will be returned (older versions of Tornado would reject the entire header for a single invalid cookie). tornado-4.5.3/docs/releases/v4.4.3.rst000066400000000000000000000004241322420601000173000ustar00rootroot00000000000000What's new in Tornado 4.4.3 =========================== Mar 30, 2017 ------------ Bug fixes ~~~~~~~~~ * The `tornado.auth` module has been updated for compatibility with `a change to Facebook's access_token endpoint. `_ tornado-4.5.3/docs/releases/v4.5.0.rst000066400000000000000000000157521322420601000173100ustar00rootroot00000000000000What's new in Tornado 4.5 ========================= Apr 16, 2017 ------------ Backwards-compatibility warning ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The `tornado.websocket` module now imposes a limit on the size of incoming messages, which defaults to 10MiB. New module ~~~~~~~~~~ - `tornado.routing` provides a more flexible routing system than the one built in to `.Application`. General changes ~~~~~~~~~~~~~~~ - Reduced the number of circular references, reducing memory usage and improving performance. `tornado.auth` ~~~~~~~~~~~~~~ * The `tornado.auth` module has been updated for compatibility with `a change to Facebook's access_token endpoint `_. This includes both the changes initially released in Tornado 4.4.3 and an additional change to support the ```session_expires`` field in the new format. The ``session_expires`` field is currently a string; it should be accessed as ``int(user['session_expires'])`` because it will change from a string to an int in Tornado 5.0. `tornado.autoreload` ~~~~~~~~~~~~~~~~~~~~ - Autoreload is now compatible with the `asyncio` event loop. - Autoreload no longer attempts to close the `.IOLoop` and all registered file descriptors before restarting; it relies on the ``CLOEXEC`` flag being set instead. `tornado.concurrent` ~~~~~~~~~~~~~~~~~~~~ - Suppressed some "'NoneType' object not callback" messages that could be logged at shutdown. `tornado.gen` ~~~~~~~~~~~~~ - ``yield None`` is now equivalent to ``yield gen.moment``. `~tornado.gen.moment` is deprecated. This improves compatibility with `asyncio`. - Fixed an issue in which a generator object could be garbage collected prematurely (most often when weak references are used. - New function `.is_coroutine_function` identifies functions wrapped by `.coroutine` or `.engine`. ``tornado.http1connection`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``Transfer-Encoding`` header is now parsed case-insensitively. `tornado.httpclient` ~~~~~~~~~~~~~~~~~~~~ - ``SimpleAsyncHTTPClient`` now follows 308 redirects. - ``CurlAsyncHTTPClient`` will no longer accept protocols other than ``http`` and ``https``. To override this, set ``pycurl.PROTOCOLS`` and ``pycurl.REDIR_PROTOCOLS`` in a ``prepare_curl_callback``. - ``CurlAsyncHTTPClient`` now supports digest authentication for proxies (in addition to basic auth) via the new ``proxy_auth_mode`` argument. - The minimum supported version of ``libcurl`` is now ``7.22.0``. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ - `.HTTPServer` now accepts the keyword argument ``trusted_downstream`` which controls the parsing of ``X-Forwarded-For`` headers. This header may be a list or set of IP addresses of trusted proxies which will be skipped in the ``X-Forwarded-For`` list. - The ``no_keep_alive`` argument works again. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ - `.url_concat` correctly handles fragments and existing query arguments. `tornado.ioloop` ~~~~~~~~~~~~~~~~ - Fixed 100% CPU usage after a callback returns an empty list or dict. - `.IOLoop.add_callback` now uses a lockless implementation which makes it safe for use from ``__del__`` methods. This improves performance of calls to `~.IOLoop.add_callback` from the `.IOLoop` thread, and slightly decreases it for calls from other threads. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ - `memoryview` objects are now permitted as arguments to `~.BaseIOStream.write`. - The internal memory buffers used by `.IOStream` now use `bytearray` instead of a list of `bytes`, improving performance. - Futures returned by `~.BaseIOStream.write` are no longer orphaned if a second call to ``write`` occurs before the previous one is finished. `tornado.log` ~~~~~~~~~~~~~ - Colored log output is now supported on Windows if the `colorama `_ library is installed and the application calls ``colorama.init()`` at startup. - The signature of the `.LogFormatter` constructor has been changed to make it compatible with `logging.config.dictConfig`. `tornado.netutil` ~~~~~~~~~~~~~~~~~ - Worked around an issue that caused "LookupError: unknown encoding: latin1" errors on Solaris. `tornado.process` ~~~~~~~~~~~~~~~~~ - `.Subprocess` no longer causes "subprocess still running" warnings on Python 3.6. - Improved error handling in `.cpu_count`. `tornado.tcpclient` ~~~~~~~~~~~~~~~~~~~ - `.TCPClient` now supports a ``source_ip`` and ``source_port`` argument. - Improved error handling for environments where IPv6 support is incomplete. `tornado.tcpserver` ~~~~~~~~~~~~~~~~~~~ - `.TCPServer.handle_stream` implementations may now be native coroutines. - Stopping a `.TCPServer` twice no longer raises an exception. `tornado.web` ~~~~~~~~~~~~~ - `.RedirectHandler` now supports substituting parts of the matched URL into the redirect location using `str.format` syntax. - New methods `.RequestHandler.render_linked_js`, `.RequestHandler.render_embed_js`, `.RequestHandler.render_linked_css`, and `.RequestHandler.render_embed_css` can be overridden to customize the output of `.UIModule`. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ - `.WebSocketHandler.on_message` implementations may now be coroutines. New messages will not be processed until the previous ``on_message`` coroutine has finished. - The ``websocket_ping_interval`` and ``websocket_ping_timeout`` application settings can now be used to enable a periodic ping of the websocket connection, allowing dropped connections to be detected and closed. - The new ``websocket_max_message_size`` setting defaults to 10MiB. The connection will be closed if messages larger than this are received. - Headers set by `.RequestHandler.prepare` or `.RequestHandler.set_default_headers` are now sent as a part of the websocket handshake. - Return values from `.WebSocketHandler.get_compression_options` may now include the keys ``compression_level`` and ``mem_level`` to set gzip parameters. The default compression level is now 6 instead of 9. Demos ~~~~~ - A new file upload demo is available in the `file_upload `_ directory. - A new `.TCPClient` and `.TCPServer` demo is available in the `tcpecho `_ directory. - Minor updates have been made to several existing demos, including updates to more recent versions of jquery. Credits ~~~~~~~ The following people contributed commits to this release: - A\. Jesse Jiryu Davis - Aaron Opfer - Akihiro Yamazaki - Alexander - Andreas Røsdal - Andrew Rabert - Andrew Sumin - Antoine Pietri - Antoine Pitrou - Artur Stawiarski - Ben Darnell - Brian Mego - Dario - Doug Vargas - Eugene Dubovoy - Iver Jordal - JZQT - James Maier - Jeff Hunter - Leynos - Mark Henderson - Michael V. DePalatis - Min RK - Mircea Ulinic - Ping - Ping Yang - Riccardo Magliocchetti - Samuel Chen - Samuel Dion-Girardeau - Scott Meisburger - Shawn Ding - TaoBeier - Thomas Kluyver - Vadim Semenov - matee - mike820324 - stiletto - zhimin - ä¾äº‘ tornado-4.5.3/docs/releases/v4.5.1.rst000066400000000000000000000004351322420601000173010ustar00rootroot00000000000000What's new in Tornado 4.5.1 =========================== Apr 20, 2017 ------------ `tornado.log` ~~~~~~~~~~~~~ - Improved detection of libraries for colorized logging. `tornado.httputil` ~~~~~~~~~~~~~~~~~~ - `.url_concat` once again treats None as equivalent to an empty sequence. tornado-4.5.3/docs/releases/v4.5.2.rst000066400000000000000000000005031322420601000172760ustar00rootroot00000000000000What's new in Tornado 4.5.2 =========================== Aug 27, 2017 ------------ Bug Fixes ~~~~~~~~~ - Tornado now sets the ``FD_CLOEXEC`` flag on all file descriptors it creates. This prevents hanging client connections and resource leaks when the `tornado.autoreload` module (or ``Application(debug=True)``) is used. tornado-4.5.3/docs/releases/v4.5.3.rst000066400000000000000000000022451322420601000173040ustar00rootroot00000000000000What's new in Tornado 4.5.3 =========================== Jan 6, 2018 ------------ `tornado.curl_httpclient` ~~~~~~~~~~~~~~~~~~~~~~~~~ - Improved debug logging on Python 3. `tornado.httpserver` ~~~~~~~~~~~~~~~~~~~~ - ``Content-Length`` and ``Transfer-Encoding`` headers are no longer sent with 1xx or 204 responses (this was already true of 304 responses). - Reading chunked requests no longer leaves the connection in a broken state. `tornado.iostream` ~~~~~~~~~~~~~~~~~~ - Writing a `memoryview` can no longer result in "BufferError: Existing exports of data: object cannot be re-sized". `tornado.options` ~~~~~~~~~~~~~~~~~ - Duplicate option names are now detected properly whether they use hyphens or underscores. `tornado.testing` ~~~~~~~~~~~~~~~~~ - `.AsyncHTTPTestCase.fetch` now uses ``127.0.0.1`` instead of ``localhost``, improving compatibility with systems that have partially-working ipv6 stacks. `tornado.web` ~~~~~~~~~~~~~ - It is no longer allowed to send a body with 1xx or 204 responses. `tornado.websocket` ~~~~~~~~~~~~~~~~~~~ - Requests with invalid websocket headers now get a response with status code 400 instead of a closed connection. tornado-4.5.3/docs/requirements.txt000066400000000000000000000000101322420601000173620ustar00rootroot00000000000000Twisted tornado-4.5.3/docs/routing.rst000066400000000000000000000002301322420601000163230ustar00rootroot00000000000000``tornado.routing`` --- Basic routing implementation ==================================================== .. automodule:: tornado.routing :members: tornado-4.5.3/docs/stack_context.rst000066400000000000000000000003221322420601000175070ustar00rootroot00000000000000``tornado.stack_context`` --- Exception handling across asynchronous callbacks ============================================================================== .. automodule:: tornado.stack_context :members: tornado-4.5.3/docs/tcpclient.rst000066400000000000000000000002421322420601000166240ustar00rootroot00000000000000``tornado.tcpclient`` --- `.IOStream` connection factory ======================================================== .. automodule:: tornado.tcpclient :members: tornado-4.5.3/docs/tcpserver.rst000066400000000000000000000002531322420601000166560ustar00rootroot00000000000000``tornado.tcpserver`` --- Basic `.IOStream`-based TCP server ============================================================ .. automodule:: tornado.tcpserver :members: tornado-4.5.3/docs/template.rst000066400000000000000000000010421322420601000164510ustar00rootroot00000000000000``tornado.template`` --- Flexible output generation =================================================== .. automodule:: tornado.template Class reference --------------- .. autoclass:: Template(template_string, name="", loader=None, compress_whitespace=None, autoescape="xhtml_escape", whitespace=None) :members: .. autoclass:: BaseLoader :members: .. autoclass:: Loader :members: .. autoclass:: DictLoader :members: .. autoexception:: ParseError .. autofunction:: filter_whitespace tornado-4.5.3/docs/testing.rst000066400000000000000000000014131322420601000163150ustar00rootroot00000000000000``tornado.testing`` --- Unit testing support for asynchronous code ================================================================== .. automodule:: tornado.testing Asynchronous test cases ----------------------- .. autoclass:: AsyncTestCase :members: .. autoclass:: AsyncHTTPTestCase :members: .. autoclass:: AsyncHTTPSTestCase :members: .. autofunction:: gen_test Controlling log output ---------------------- .. autoclass:: ExpectLog :members: .. autoclass:: LogTrapTestCase :members: Test runner ----------- .. autofunction:: main Helper functions ---------------- .. autofunction:: bind_unused_port .. autofunction:: get_unused_port .. autofunction:: get_async_test_timeout tornado-4.5.3/docs/tornado.png000066400000000000000000000156751322420601000163010ustar00rootroot00000000000000‰PNG  IHDRH išˆtEXtSoftwareAdobe ImageReadyqÉe<_IDATxÚì][l\×uÝó~¿É¡HŠIY„$˲d*~$Q,%Ѝ#•Qä§6PDŸ‰Q ÒOÑ´_ÒGj§V?¢¶âª)Р‘RÓŽ;Ž(Ë’ìX/Š’(’"9ï÷ƒ3ÝûÜ{îÜ 9wÈácȳ‰Ã{ç¾çž³×¬½ÏÙûèÊå2¬³±ÃrËn,}X‚XŠX>ÆrË"dÓˆn€ç–ïÉ€óµÇƱüËyQ]B„àYؼŽå/°ô.ãüAÁ|„À£Ulþ˾ÚÙ…ˆçó0ŸÍCK¡T†X¡ ìßé²Ãþ€$Æó†¨2!BÚ_Œ«t]/–cùëZv“È`&“…™tæsù%/r7‘†A—\f}Ü#ªKˆ*ªMˆ­ <Ô%þoj³ŠLª‘DCÀñ˜LÐç´Á6›UÍj˜(^ÈC,_„LqlJ{}¾û—X¾+ªMˆ­kj½’™I$—ƒOçcU½Rõ˜Ív4Ÿúö*°y”JÁL:¡\R6$å’ˆ„‹N~«…>öˆ*"dk™V`y†oø"ƒ›±ä’€3ìq€˩l›Íd˜³y:“SØ‘6ò?ZvZÌ`7)©U&DÈž…Ré€N§WÝô9…ìæj(ºhO•Í`€ÝÞ àïg&[RœÙpÀ!ñ" ПǃK5+’å_D• Òþ¢Ùǃ¬ä€^b:ÌŸÍçá£ÙH]_ŽI¯‡a·vº€@¹…"ÜK¤ªœÍj°éCó«‹Ãdýâœf$ç²!B¶ðäˆéÀoq•Q—‡É|Ž×=v›Í{}np%2õe4÷Ф"Àᦔ©BóË«b5É|æ3yHаÓï«A!dÿˆ€ô÷¢º„Ù"¦šGcõ[¤' t%Óp-¯Ërv!Ë”ͪ)4©>$ ƒæg7&D¯!:‹Yî*§^¬éd Šæ‹@@Xf ´Ï÷øùåßdNB„ÙÀ“) tȼrê4ƒóŒßÍÆÞäK p-c!ÜIlä€ãAÀÑK€3›ÊÀx,S©pÖÅÉ-i›ÊˆûÛ……Òð2i\ÿ=–Oè&Dõ ²ÉL­t±èeLGîœB ¸^t x™ù4›ÉÂg¡8Èq¬cÀÓg·ÂÞ€L‰±Ì# Ý '`.#9¤•¬2±&tZÍÐí´BË&£RúúY,ÿ‹å—fƒþç¢*…ÙÀ“*þ¤‚h6eáFÐéEPÙç—÷݉&à62˜²LYÜF#ìïô€Û"Dá5nE’ pÊ âH̦Ça…~šn·ýÉÔiêA'&ôOX~j5¢¢Z…iCàI 40ð-æƒ)àò|Š¥êãzt|¬W®£i5™Ì(¦Ò Ë{Ò>JuñÅ\ ¨öÓÒˆìf‡Ó;ýN°›%‹//Â\2 Ñl"¹"2§[óx]9MðZŒDVäsXžxt,g3ßU+DHO<ŸÐî®ºŠ¸ïÊ|âIùy¯ÎÓÈtˆŒ\Ÿ‹Âd*+ùrLöú\Ð+3—™D®"èø”ešS.ë`·ß;}Nf~¸Ü¤`"žh® ™h*6TëûQ|@øG‡¼^8¬UîªóXÞ´‚ýÒ&ÀóH‘æh:%á~"]å‹q¡ u¨ËÇ@æÆ<1,êÕza›O1­ˆåÜ¥e°a·‚n»öÝ`3¡ˆ€s'”@- yUxÄR`#C—êØÊs?Óé„=]n0•Þ¯«“é ¨b!B68ðDsù#ÈbxÐÄÉòµ/†z§÷v2Ð™Š§áZ(Î…¶¿ÐM cb æ£GaˆÉì…Ć`p°Ë»dÝœÃíhZÅ„*€ÓæS'çCãÅ%wJÓu#ƒšŒea2‘…ü‚ê\‰|V €Ó¦Œ :ï4™DÖB!B62ðDr¹ \ì@S >™ C¼XTÌ+Òî+dÐôúýTXˆ»^@@À(Kðñ£Äò {! 9°Í &Ÿ²£±™¤ ¥*°ñ˜° ͯMš¥„@èÚT ¾œOq\T@ˆ˜×Ÿï‚Óª€Ï«.³é‚¨j!B6Ž(ZÎæ¨k­O%Ó(ˬ¢„àCfR2u®£•Gå§è‡=>'øìfÈà÷“adMEv<ÊÓ'êõƒÁäKd9<Œ°lŒN‰_/[„H&%90´Q!péóÁ·†ȶôìü9sø¿¹3ÏžSþ{KT³!K,êí8k¦TþSîဋm#‹Âh=`5C¿ÇÁÖ?E&Cæg ‚.èó: ýi>SxÂ7£öÛÜ §a&™‡#C`6–|h~ Û/üšMeG(]€[3 ص…• Äòù×=fóùF/ã•ŸŽŠ!¤Ýd˦Šò©¶G°\i^ºñ Éxæ3Ù™#4s© ¤Š¥ #Á}N+|H¯oER¬gŠ 9ŠiÛ½pfR9v,ól‡ ¶# EÓyøíýÌ!pvÇËÿTÌçÊ£˜š­4ü z¬0èµÉ׬ÜãÓ©„r/,ßíSÈ&•rñÕl÷©ömLS‹±™0L%+BJl@ÆÓçµ3öJç ]X` Ôë0ƒ™IÍÍ0gH:Ø…ÇöbI!ûùðš^Ùbè(€#o«#€ÉxV  ƒ|?>ŸêúThòÍ'² Žˆö)DȆžòQROr?Fp!‡2g<4¢Ø(³ÉXZñÃì@3Š–£iÈÉ ÉjÐîN) êÇ£¸½\6@+WMI6ë¨ÐµB~Z|=TèÜ.d<ìÚ%©g‹™…±¿3œÍ hüõ(¯C Ë”Yˆ­<Ó4³àYRÐÙdVf ºŠSÙeUXÄLJòíPÊ §ÕÈÀäÌPè¼aŸm»3—`ƒkM©ÊRW6ÌÙ¬b1yõ¾F%'÷¢Ñ5¨^ÙŽìL¥ÝZ€çØ:Õƒoï-DÈÚªè·8@„™/¦Âv(<Á"ÈËä‹lì íó3å–‚›R4¶§—€×ïD2‹šRK©u¯mïôXš¢ Ó±ŒrîÎ{Ø Ü4:ê`ÁGˆ-!FÔǯò4pIÕãÔi7ËÊ]f¾²l*‘9CÛbrï­»LŠg Ê/(×PÔ]γSÛ£dg»g¹2B™¤Qœèý°t>›I6ÝÊOÞ_ˆ!xžã\Ô±R¤È|½Äü>Òº™ºw  Âf"rV¹Ø@ð”k¢Ô¥ÕŸM<òy)²’@ »=Ÿagì Øýä*ù|Jå*"DÈÆ1µz™™Åré“…–N‹IñÓøœfx&èdÅE€„'»d`¢Ë™éEl§TÇo£6¥¸“¹\cjuâõ‚öŠã˜Ò ºÓ?¼‘¢©÷T—âøü|§¥A›mTÃû[ǺÍQÈ–a<ˆœx”U€@bÐëªØF×&³›23Œ¸Ì:×Å¡ßk}"€SÍnê/Ë 8žßá©Ënø¦Z‹ëw£0ȳë?»Ý &“ž1ž’l/ú&Îx&5¾sXFšðÉ4:ö’ÆkÑ`¯wEs²e€‡¯o†›R¤üV£nI#Eɽƒ`te:ÁÖIJrÔâ¦Ô˜aôÏ„vd—ì6㢦‘ªŸç“»øb6ÍîÓï±ÂÞí.H¤ò—C2Ìz=øÝƺ°.7Á:Žk<–z¡.68æ¸hbB„Ôe<’:³+ݰ {‚(;àö€fSx“ÒcL%rUQãå2TE¸×uz¬F8<ä‡ÊŸTè¸ßQäó‡ øü±4hÑ`õõaŸCö(Iö®éyIqe2`ý—¨j!B6ð¨@ÝãÄü. NæÀt°ÏÍÖD³‹šRêÏe™å wØa_Ÿ»Êœ‚†÷pX ,Ç6·ûYä;m¿=—QÀ³ßoSzÇðÐQQÕB„l$à‘5Þ"+/ÿLs[i,ú=ÐçµÂŸ‰Ì!ª^«vЋǒOFë„‚j–Õßi‡tÚ«î ÙNVN·á0`G—ß÷Âv‡c¢Íë‰ÇÝOiH^úêø“ÈW4&¯/×Q>õß•—jù¡ü\ÇTÇѽÏ.rÿf® ò÷äßûXIÌ¿çbç®ä»/õžÇTß“?C«ëYý êú¥ûƒµíˆX•¶gä&ŒA T®°“t¡ Z1³ ¸-ðu,éLæùÊx,^4¥‚¸ÀF ‹9ޱ,õ9ù| >žˆÉ¦œnw*Ç!Pµ³™5"+÷5úœ*Aã2œkòž—¡þ`FºþkªÆø xr´5o¤Ô@-óÚü{ŸÅGsÉå„|ÜIX¾sÞ§zÇCëDý¾Çåw|v…u}J.¾%ê÷˜|ÌY¹DÚµíã™Áå6Å!¬2·Ò¹Ø,F ` !ü³YMÐo55–ƒ_]ã S®biYe#ÀÝXâ@Î)¿ÂÔcpZö'—mh­ŽÆ•€G3&DdßãJ•ù]ÕûÕ©ÊNÙ‡4Ö¢g8ÓÄ»¼•ž£MÑöô2¨\`£•ÍFèq›«L—éx¦Ã™ee·ZœÁh¬z us2 ÿs-$Œj1தÝ(W›·L,-¿ÔTé‡äÊæ‘+ÊNÐÖƒuªÅÏÎóúª»Ð[ÑÅ<.+º_VüC ­çJKn£±:ÊÍßákª÷[û<çä縤AÉÓÆúö«Ý/¿“H»·=žðg\±Ÿ ÚŸÈsåaR™‚Ê \[óÙ€æ™#´€ÖG_FáÃ;Q)ã!åÒéàÛû,&K˜ª­6‘ÅIXºË6"ÓH)†ZD©#²’—Ÿ‹7LÎ V"çT9¢‹×46ð Ï~®p΂öž¿“Þq#Ó–YßüÆÚ¹í1àÙãó©užÔÔj5¿µ’¦”%ü*ÃÆã—»¬Ÿ4‰šé¹j´¦æ³pá“ÇðÅtJ1©(TâÛÏÀOIÃjRªª@«]¦0ndþ\íãqNjll+ã‹0ñþ"7ê?­áú>ïÉËÓè;ެðýŸkPß<¦p¬]Ûž^¥¤o¢GI‰wm³ƒËb¬ê)ŠfŠðá­¤3ÅŠb/£4ZÉt>ø< ¿º‚Pª¨<ÏvŸ¾}°ƒ²)w"yùZ:pZ h‘ÿªM̬ ±¥X©) ¥­F -cq´Dò¬A½-÷ûhF-ƒ9㌴cÛS€çi¿'Jã^HF=<»ÝÉÒ™–xâw|~ó§0L¿÷íë¡è˸ú=—Ã/ yàwwc,Õ·ahšâË÷ã0æO¿ß=~ ƒ0,‘ž´ ““á<ŠæØ ¤,Ëád ¾³ß>·¹îY÷ç²UÑòÄr^Üå—ÃTå¹I$ 0Ÿ*€N xyºÖ¶ mUNŸVükžÕ’Èø~kÁp[]ßm×öŒO¼ÿpõ€Ëi„¯îtÇ·c2`ÐdëR^ŠÅ¢ry"n›‘E›Ó8 5(ÌÆóK!'ƒ0ªSâ¨î#xy\溠µ{»Ù0u°·×ÝV¶%!S7úED€ë»ºìªX®¶‰ÑjÔ—óë®åœcËlXؼ¢b=Ö„iÔì=6Êû]·¶÷ðìx£Ÿ…"Gqõ= |LðÕ§••Ô©|½$Os\;Àºê½È{ è77Ì©¼Ø Ãkãq¸õXÊ@H^Þã³Yɱü3¡KB4²¾»°òÐ!MÊ¢¡ç:|ÑOç‰ù”߯3ó§Ï;|nOgà~( y"= t2»©f6œ¡˜Ø¼[Âó»ü–'ösYÊW¤ŠFçSñ¼„æà@·2;ÅÕýßÛ¢j…h`9ïˆ×°Á€‡ä ‚.Þ¸2&Éϱxf=ìA“†JM¬HªQ¹LEsU¦Ô@À ^‡ºW¹OÐÊa‰=¯¨V!˜Ž <\žëð_› âê±üÈÓÛÐd¢Ò#»b.^™‡Hº’üëq,»A1¿ÂÑVP¢ÛwtXáè~Ÿjnvx³MAK#§_¥f†®ŸhÁ=7³§‘‰µÚ,£‘âŽhüqh6ëâ†i{ÆV½Íçƒó>lJà3@ìg°×΀‚Ì- 4% % ¬kz¥¦Ây6*:™-1ŸO"· Ð‚Ï·FU,èætZI{ñÒn7˜ÌÊ,o Ú•íðY |-ª|-IË·2ð¬Ôï±R¦¡PNAã|Ê­p¯[ÛÓ·²Æ|F_9Þৃ½6øæ3> É%Êr¸9|~ö OÀU\R;C£Æ6&ðeÙ¦ÀÊÆýD4¼ÿ°tÔ8í»ØÎmO¿5÷B°ã<.Ž"(PvCÖ•þÝ;aW·4ä‡ÜÛ,J“üõù­°¿ß‡Ÿñ(¨YdB܈*Ñé{ɯSX¸ j¡÷g ñô'Z’ŸÛâÀÒHéO5`9­È ­¥¾ß©p'ä:~§ÝÛž®Ù õš‘ÏSÀ÷ð,)õTås%˜çà³{I6ˆOøç´à•;Àly ßÿ4 ·g¤hö€ÃßÿFïšx±«c°UÏûÊOG›õ4úÕÑ5q=­Ý³êÉä¸2hÍhGç5šÏ½ÜÂï´Z×nôî—úžZò4\)‡T¿üZó5ëØ]X›,žrCµ½U.¿›™û .~ªT¤T+Ÿ'᳉óó0f„ ò¯ÀlÖ+õ&NFi±¯"sêô+NéW_ê기I€‡*ïò*6Fj4”¹n|‹ÏZ)}£ïÒìüa« šåTüqÁ£ü]œÝÏqZãk‹´ Ý¬yÛÓ¯Õ[Fð‰byûëÛ:S!3‰¶|f8þœOr÷ ÂܘLÁè•üdz0É+éOïõÀð€ƒg/¤¤e¯nB¥XÎl˜Z+^8”[«ôc-°•fƒÖå_^Ó¶§_ZGð¡Y-Òì98”¼kHŽ·¢Ï·f*³ZÃù;#~ÞáàctŽmR¥he2oòQì SWË­¸ÁÊG€¯$wò¹U`±kÖöôëU뇻;@ʇɻC¥/h­ÌH!÷zííµÃ_¾„î …÷a±t\!k¼A¥¯d¬ ÿ%;¹ 刨år~Q/­Òwjåµ[ñîyhÄÉ&”—ûŽNk¨¿fëú´ÆºâÏ žæf©äóã±í­‰sy)yz–b \Ssð«±0øFxªÇ;¶YÁíªßxU‡æÕ7ºƒ«õ,M:—¹ÔK˜Ýêz<#ÞªÔ6>>äR‹î7²È=6ҵ뽋å~ÿ‘šw\ rï.¢ÄµÇG`eŒ÷«sÍK ®}lêlUÚÞºÏ{S³3¸èÊg¥QË«¡ö2©~v¤'øa²9ĸÞ€¸w‹€Çd‘§TV"Óà`yûhO0*ªJˆ<­(ÿ.3ú:²uÞG0ýfopTT!›Sþ_€upÙЪ”4AIEND®B`‚tornado-4.5.3/docs/twisted.rst000066400000000000000000000007671322420601000163360ustar00rootroot00000000000000``tornado.platform.twisted`` --- Bridges between Twisted and Tornado ======================================================================== .. automodule:: tornado.platform.twisted Twisted on Tornado ------------------ .. autoclass:: TornadoReactor :members: .. autofunction:: install Tornado on Twisted ------------------ .. autoclass:: TwistedIOLoop :members: Twisted DNS resolver -------------------- .. autoclass:: TwistedResolver :members: tornado-4.5.3/docs/util.rst000066400000000000000000000002711322420601000156160ustar00rootroot00000000000000``tornado.util`` --- General-purpose utilities ============================================== .. testsetup:: from tornado.util import * .. automodule:: tornado.util :members: tornado-4.5.3/docs/utilities.rst000066400000000000000000000001471322420601000166560ustar00rootroot00000000000000Utilities ========= .. toctree:: autoreload log options stack_context testing util tornado-4.5.3/docs/web.rst000066400000000000000000000310451322420601000154210ustar00rootroot00000000000000``tornado.web`` --- ``RequestHandler`` and ``Application`` classes ================================================================== .. testsetup:: from tornado.web import * .. automodule:: tornado.web Request handlers ---------------- .. autoclass:: RequestHandler Entry points ^^^^^^^^^^^^ .. automethod:: RequestHandler.initialize .. automethod:: RequestHandler.prepare .. automethod:: RequestHandler.on_finish .. _verbs: Implement any of the following methods (collectively known as the HTTP verb methods) to handle the corresponding HTTP method. These methods can be made asynchronous with one of the following decorators: `.gen.coroutine`, `.return_future`, or `asynchronous`. The arguments to these methods come from the `.URLSpec`: Any capturing groups in the regular expression become arguments to the HTTP verb methods (keyword arguments if the group is named, positional arguments if its unnamed). To support a method not on this list, override the class variable ``SUPPORTED_METHODS``:: class WebDAVHandler(RequestHandler): SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('PROPFIND',) def propfind(self): pass .. automethod:: RequestHandler.get .. automethod:: RequestHandler.head .. automethod:: RequestHandler.post .. automethod:: RequestHandler.delete .. automethod:: RequestHandler.patch .. automethod:: RequestHandler.put .. automethod:: RequestHandler.options Input ^^^^^ .. automethod:: RequestHandler.get_argument .. automethod:: RequestHandler.get_arguments .. automethod:: RequestHandler.get_query_argument .. automethod:: RequestHandler.get_query_arguments .. automethod:: RequestHandler.get_body_argument .. automethod:: RequestHandler.get_body_arguments .. automethod:: RequestHandler.decode_argument .. attribute:: RequestHandler.request The `tornado.httputil.HTTPServerRequest` object containing additional request parameters including e.g. headers and body data. .. attribute:: RequestHandler.path_args .. attribute:: RequestHandler.path_kwargs The ``path_args`` and ``path_kwargs`` attributes contain the positional and keyword arguments that are passed to the :ref:`HTTP verb methods `. These attributes are set before those methods are called, so the values are available during `prepare`. .. automethod:: RequestHandler.data_received Output ^^^^^^ .. automethod:: RequestHandler.set_status .. automethod:: RequestHandler.set_header .. automethod:: RequestHandler.add_header .. automethod:: RequestHandler.clear_header .. automethod:: RequestHandler.set_default_headers .. automethod:: RequestHandler.write .. automethod:: RequestHandler.flush .. automethod:: RequestHandler.finish .. automethod:: RequestHandler.render .. automethod:: RequestHandler.render_string .. automethod:: RequestHandler.get_template_namespace .. automethod:: RequestHandler.redirect .. automethod:: RequestHandler.send_error .. automethod:: RequestHandler.write_error .. automethod:: RequestHandler.clear .. automethod:: RequestHandler.render_linked_js .. automethod:: RequestHandler.render_embed_js .. automethod:: RequestHandler.render_linked_css .. automethod:: RequestHandler.render_embed_css Cookies ^^^^^^^ .. autoattribute:: RequestHandler.cookies .. automethod:: RequestHandler.get_cookie .. automethod:: RequestHandler.set_cookie .. automethod:: RequestHandler.clear_cookie .. automethod:: RequestHandler.clear_all_cookies .. automethod:: RequestHandler.get_secure_cookie .. automethod:: RequestHandler.get_secure_cookie_key_version .. automethod:: RequestHandler.set_secure_cookie .. automethod:: RequestHandler.create_signed_value .. autodata:: MIN_SUPPORTED_SIGNED_VALUE_VERSION .. autodata:: MAX_SUPPORTED_SIGNED_VALUE_VERSION .. autodata:: DEFAULT_SIGNED_VALUE_VERSION .. autodata:: DEFAULT_SIGNED_VALUE_MIN_VERSION Other ^^^^^ .. attribute:: RequestHandler.application The `Application` object serving this request .. automethod:: RequestHandler.check_etag_header .. automethod:: RequestHandler.check_xsrf_cookie .. automethod:: RequestHandler.compute_etag .. automethod:: RequestHandler.create_template_loader .. autoattribute:: RequestHandler.current_user .. automethod:: RequestHandler.get_browser_locale .. automethod:: RequestHandler.get_current_user .. automethod:: RequestHandler.get_login_url .. automethod:: RequestHandler.get_status .. automethod:: RequestHandler.get_template_path .. automethod:: RequestHandler.get_user_locale .. autoattribute:: RequestHandler.locale .. automethod:: RequestHandler.log_exception .. automethod:: RequestHandler.on_connection_close .. automethod:: RequestHandler.require_setting .. automethod:: RequestHandler.reverse_url .. automethod:: RequestHandler.set_etag_header .. autoattribute:: RequestHandler.settings .. automethod:: RequestHandler.static_url .. automethod:: RequestHandler.xsrf_form_html .. autoattribute:: RequestHandler.xsrf_token Application configuration ----------------------------- .. autoclass:: Application :members: .. attribute:: settings Additional keyword arguments passed to the constructor are saved in the `settings` dictionary, and are often referred to in documentation as "application settings". Settings are used to customize various aspects of Tornado (although in some cases richer customization is possible by overriding methods in a subclass of `RequestHandler`). Some applications also like to use the `settings` dictionary as a way to make application-specific settings available to handlers without using global variables. Settings used in Tornado are described below. General settings: * ``autoreload``: If ``True``, the server process will restart when any source files change, as described in :ref:`debug-mode`. This option is new in Tornado 3.2; previously this functionality was controlled by the ``debug`` setting. * ``debug``: Shorthand for several debug mode settings, described in :ref:`debug-mode`. Setting ``debug=True`` is equivalent to ``autoreload=True``, ``compiled_template_cache=False``, ``static_hash_cache=False``, ``serve_traceback=True``. * ``default_handler_class`` and ``default_handler_args``: This handler will be used if no other match is found; use this to implement custom 404 pages (new in Tornado 3.2). * ``compress_response``: If ``True``, responses in textual formats will be compressed automatically. New in Tornado 4.0. * ``gzip``: Deprecated alias for ``compress_response`` since Tornado 4.0. * ``log_function``: This function will be called at the end of every request to log the result (with one argument, the `RequestHandler` object). The default implementation writes to the `logging` module's root logger. May also be customized by overriding `Application.log_request`. * ``serve_traceback``: If true, the default error page will include the traceback of the error. This option is new in Tornado 3.2; previously this functionality was controlled by the ``debug`` setting. * ``ui_modules`` and ``ui_methods``: May be set to a mapping of `UIModule` or UI methods to be made available to templates. May be set to a module, dictionary, or a list of modules and/or dicts. See :ref:`ui-modules` for more details. * ``websocket_ping_interval``: If set to a number, all websockets will be pinged every n seconds. This can help keep the connection alive through certain proxy servers which close idle connections, and it can detect if the websocket has failed without being properly closed. * ``websocket_ping_timeout``: If the ping interval is set, and the server doesn't receive a 'pong' in this many seconds, it will close the websocket. The default is three times the ping interval, with a minimum of 30 seconds. Ignored if the ping interval is not set. Authentication and security settings: * ``cookie_secret``: Used by `RequestHandler.get_secure_cookie` and `.set_secure_cookie` to sign cookies. * ``key_version``: Used by requestHandler `.set_secure_cookie` to sign cookies with a specific key when ``cookie_secret`` is a key dictionary. * ``login_url``: The `authenticated` decorator will redirect to this url if the user is not logged in. Can be further customized by overriding `RequestHandler.get_login_url` * ``xsrf_cookies``: If true, :ref:`xsrf` will be enabled. * ``xsrf_cookie_version``: Controls the version of new XSRF cookies produced by this server. Should generally be left at the default (which will always be the highest supported version), but may be set to a lower value temporarily during version transitions. New in Tornado 3.2.2, which introduced XSRF cookie version 2. * ``xsrf_cookie_kwargs``: May be set to a dictionary of additional arguments to be passed to `.RequestHandler.set_cookie` for the XSRF cookie. * ``twitter_consumer_key``, ``twitter_consumer_secret``, ``friendfeed_consumer_key``, ``friendfeed_consumer_secret``, ``google_consumer_key``, ``google_consumer_secret``, ``facebook_api_key``, ``facebook_secret``: Used in the `tornado.auth` module to authenticate to various APIs. Template settings: * ``autoescape``: Controls automatic escaping for templates. May be set to ``None`` to disable escaping, or to the *name* of a function that all output should be passed through. Defaults to ``"xhtml_escape"``. Can be changed on a per-template basis with the ``{% autoescape %}`` directive. * ``compiled_template_cache``: Default is ``True``; if ``False`` templates will be recompiled on every request. This option is new in Tornado 3.2; previously this functionality was controlled by the ``debug`` setting. * ``template_path``: Directory containing template files. Can be further customized by overriding `RequestHandler.get_template_path` * ``template_loader``: Assign to an instance of `tornado.template.BaseLoader` to customize template loading. If this setting is used the ``template_path`` and ``autoescape`` settings are ignored. Can be further customized by overriding `RequestHandler.create_template_loader`. * ``template_whitespace``: Controls handling of whitespace in templates; see `tornado.template.filter_whitespace` for allowed values. New in Tornado 4.3. Static file settings: * ``static_hash_cache``: Default is ``True``; if ``False`` static urls will be recomputed on every request. This option is new in Tornado 3.2; previously this functionality was controlled by the ``debug`` setting. * ``static_path``: Directory from which static files will be served. * ``static_url_prefix``: Url prefix for static files, defaults to ``"/static/"``. * ``static_handler_class``, ``static_handler_args``: May be set to use a different handler for static files instead of the default `tornado.web.StaticFileHandler`. ``static_handler_args``, if set, should be a dictionary of keyword arguments to be passed to the handler's ``initialize`` method. .. autoclass:: URLSpec The ``URLSpec`` class is also available under the name ``tornado.web.url``. Decorators ---------- .. autofunction:: asynchronous .. autofunction:: authenticated .. autofunction:: addslash .. autofunction:: removeslash .. autofunction:: stream_request_body Everything else --------------- .. autoexception:: HTTPError .. autoexception:: Finish .. autoexception:: MissingArgumentError .. autoclass:: UIModule :members: .. autoclass:: ErrorHandler .. autoclass:: FallbackHandler .. autoclass:: RedirectHandler .. autoclass:: StaticFileHandler :members: tornado-4.5.3/docs/webframework.rst000066400000000000000000000001521322420601000173320ustar00rootroot00000000000000Web framework ============= .. toctree:: web template routing escape locale websocket tornado-4.5.3/docs/websocket.rst000066400000000000000000000021501322420601000166250ustar00rootroot00000000000000``tornado.websocket`` --- Bidirectional communication to the browser ==================================================================== .. testsetup:: import tornado.websocket .. automodule:: tornado.websocket .. autoclass:: WebSocketHandler Event handlers -------------- .. automethod:: WebSocketHandler.open .. automethod:: WebSocketHandler.on_message .. automethod:: WebSocketHandler.on_close .. automethod:: WebSocketHandler.select_subprotocol .. automethod:: WebSocketHandler.on_ping Output ------ .. automethod:: WebSocketHandler.write_message .. automethod:: WebSocketHandler.close Configuration ------------- .. automethod:: WebSocketHandler.check_origin .. automethod:: WebSocketHandler.get_compression_options .. automethod:: WebSocketHandler.set_nodelay Other ----- .. automethod:: WebSocketHandler.ping .. automethod:: WebSocketHandler.on_pong .. autoexception:: WebSocketClosedError Client-side support ------------------- .. autofunction:: websocket_connect .. autoclass:: WebSocketClientConnection :members: tornado-4.5.3/docs/wsgi.rst000066400000000000000000000007631322420601000156200ustar00rootroot00000000000000``tornado.wsgi`` --- Interoperability with other Python frameworks and servers ============================================================================== .. automodule:: tornado.wsgi Running Tornado apps on WSGI servers ------------------------------------ .. autoclass:: WSGIAdapter :members: .. autoclass:: WSGIApplication :members: Running WSGI apps on Tornado servers ------------------------------------ .. autoclass:: WSGIContainer :members: tornado-4.5.3/maint/000077500000000000000000000000001322420601000142675ustar00rootroot00000000000000tornado-4.5.3/maint/README000066400000000000000000000002421322420601000151450ustar00rootroot00000000000000This directory contains tools and scripts that are used in the development and maintainance of Tornado itself, but are probably not of interest to Tornado users. tornado-4.5.3/maint/circlerefs/000077500000000000000000000000001322420601000164105ustar00rootroot00000000000000tornado-4.5.3/maint/circlerefs/circlerefs.py000066400000000000000000000054641322420601000211140ustar00rootroot00000000000000#!/usr/bin/env python """Test script to find circular references. Circular references are not leaks per se, because they will eventually be GC'd. However, on CPython, they prevent the reference-counting fast path from being used and instead rely on the slower full GC. This increases memory footprint and CPU overhead, so we try to eliminate circular references created by normal operation. """ from __future__ import print_function import gc import traceback import types from tornado import web, ioloop, gen, httpclient def find_circular_references(garbage=None): def inner(level): for item in level: item_id = id(item) if item_id not in garbage_ids: continue if item_id in visited_ids: continue if item_id in stack_ids: candidate = stack[stack.index(item):] candidate.append(item) found.append(candidate) continue stack.append(item) stack_ids.add(item_id) inner(gc.get_referents(item)) stack.pop() stack_ids.remove(item_id) visited_ids.add(item_id) garbage = garbage or gc.garbage found = [] stack = [] stack_ids = set() garbage_ids = set(map(id, garbage)) visited_ids = set() inner(garbage) inner = None return found class CollectHandler(web.RequestHandler): @gen.coroutine def get(self): self.write("Collected: {}\n".format(gc.collect())) self.write("Garbage: {}\n".format(len(gc.garbage))) for circular in find_circular_references(): print('\n==========\n Circular \n==========') for item in circular: print(' ', repr(item)) for item in circular: if isinstance(item, types.FrameType): print('\nLocals:', item.f_locals) print('\nTraceback:', repr(item)) traceback.print_stack(item) class DummyHandler(web.RequestHandler): @gen.coroutine def get(self): self.write('ok\n') application = web.Application([ (r'/dummy/', DummyHandler), (r'/collect/', CollectHandler), ], debug=True) @gen.coroutine def main(): gc.disable() gc.collect() gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_SAVEALL) print('GC disabled') print("Start on 8888") application.listen(8888, '127.0.0.1') # Do a little work. Alternately, could leave this script running and # poke at it with a browser. client = httpclient.AsyncHTTPClient() yield client.fetch('http://127.0.0.1:8888/dummy/') # Now report on the results. gc.collect() resp = yield client.fetch('http://127.0.0.1:8888/collect/') print(resp.body) if __name__ == "__main__": ioloop.IOLoop.current().run_sync(main) tornado-4.5.3/maint/requirements.in000066400000000000000000000011061322420601000173400ustar00rootroot00000000000000# Requirements for tools used in the development of tornado. # This list is for python 3.5; for 2.7 add: # - backports.ssl-match-hostname # - futures # - mock # - certifi # # Use virtualenv instead of venv; tox seems to get confused otherwise. # # maint/requirements.txt contains the pinned versions of all direct and # indirect dependencies; this file only contains direct dependencies # and is useful for upgrading. # Tornado's optional dependencies Twisted pycares pycurl # Other useful tools Sphinx autopep8 coverage flake8 pep8 pyflakes sphinx-rtd-theme tox twine virtualenv tornado-4.5.3/maint/requirements.txt000066400000000000000000000011771322420601000175610ustar00rootroot00000000000000alabaster==0.7.10 appdirs==1.4.3 args==0.1.0 attrs==16.3.0 Automat==0.5.0 autopep8==1.3.1 Babel==2.4.0 clint==0.5.1 constantly==15.1.0 coverage==4.3.4 docutils==0.13.1 flake8==3.3.0 imagesize==0.7.1 incremental==16.10.1 Jinja2==2.9.6 MarkupSafe==1.0 mccabe==0.6.1 packaging==16.8 pep8==1.7.0 pkginfo==1.4.1 pluggy==0.4.0 py==1.4.33 pycares==2.1.1 pycodestyle==2.3.1 pycurl==7.43.0 pyflakes==1.5.0 Pygments==2.2.0 pyparsing==2.2.0 pytz==2017.2 requests==2.13.0 requests-toolbelt==0.7.1 six==1.10.0 snowballstemmer==1.2.1 Sphinx==1.5.5 sphinx-rtd-theme==0.2.4 tox==2.7.0 twine==1.8.1 Twisted==17.1.0 virtualenv==15.1.0 zope.interface==4.3.3 tornado-4.5.3/maint/scripts/000077500000000000000000000000001322420601000157565ustar00rootroot00000000000000tornado-4.5.3/maint/scripts/custom_fixers/000077500000000000000000000000001322420601000206505ustar00rootroot00000000000000tornado-4.5.3/maint/scripts/custom_fixers/__init__.py000066400000000000000000000000001322420601000227470ustar00rootroot00000000000000tornado-4.5.3/maint/scripts/custom_fixers/fix_future_imports.py000066400000000000000000000042101322420601000251540ustar00rootroot00000000000000"""Updates all source files to import the same set of __future__ directives. """ from lib2to3 import fixer_base from lib2to3 import pytree from lib2to3.pgen2 import token from lib2to3.fixer_util import FromImport, Name, Comma, Newline # copied from fix_tuple_params.py def is_docstring(stmt): return isinstance(stmt, pytree.Node) and \ stmt.children[0].type == token.STRING class FixFutureImports(fixer_base.BaseFix): BM_compatible = True PATTERN = """import_from< 'from' module_name="__future__" 'import' any >""" def start_tree(self, tree, filename): self.found_future_import = False def new_future_import(self, old): new = FromImport("__future__", [Name("absolute_import", prefix=" "), Comma(), Name("division", prefix=" "), Comma(), Name("print_function", prefix=" ")]) if old is not None: new.prefix = old.prefix return new def transform(self, node, results): self.found_future_import = True return self.new_future_import(node) def finish_tree(self, tree, filename): if self.found_future_import: return if not isinstance(tree, pytree.Node): # Empty files (usually __init__.py) show up as a single Leaf # instead of a Node, so leave them alone return first_stmt = tree.children[0] if is_docstring(first_stmt): # Skip a line and add the import after the docstring tree.insert_child(1, Newline()) pos = 2 elif first_stmt.prefix: # No docstring, but an initial comment (perhaps a #! line). # Transfer the initial comment to a new blank line. newline = Newline() newline.prefix = first_stmt.prefix first_stmt.prefix = "" tree.insert_child(0, newline) pos = 1 else: # No comments or docstring, just insert at the start pos = 0 tree.insert_child(pos, self.new_future_import(None)) tree.insert_child(pos+1, Newline()) # terminates the import stmt tornado-4.5.3/maint/scripts/custom_fixers/fix_unicode_literal.py000066400000000000000000000006251322420601000252350ustar00rootroot00000000000000from lib2to3 import fixer_base from lib2to3.fixer_util import String class FixUnicodeLiteral(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'u' trailer< '(' arg=any ')' > > """ def transform(self, node, results): arg = results["arg"] node.replace(String('u'+arg.value, prefix=node.prefix)) tornado-4.5.3/maint/scripts/run_autopep8.sh000077500000000000000000000010261322420601000207450ustar00rootroot00000000000000#!/bin/sh # Runs autopep8 in the configuration used for tornado. # # W602 is "deprecated form of raising exception", but the fix is incorrect # (and I'm not sure if the three-argument form of raise is really deprecated # in the first place) # E501 is "line longer than 80 chars" but the automated fix is ugly. # E301 adds a blank line between docstring and first method # E309 adds a blank line between class declaration and docstring (?) autopep8 --ignore=W602,E501,E301,E309 -i tornado/*.py tornado/platform/*.py tornado/test/*.py tornado-4.5.3/maint/scripts/run_fixers.py000077500000000000000000000002561322420601000205220ustar00rootroot00000000000000#!/usr/bin/env python # Usage is like 2to3: # $ maint/scripts/run_fixers.py -wn --no-diffs tornado import sys from lib2to3.main import main sys.exit(main("custom_fixers")) tornado-4.5.3/maint/scripts/test_resolvers.py000066400000000000000000000027261322420601000214220ustar00rootroot00000000000000#!/usr/bin/env python from __future__ import print_function import pprint import socket from tornado import gen from tornado.ioloop import IOLoop from tornado.netutil import Resolver, ThreadedResolver from tornado.options import parse_command_line, define, options try: import twisted except ImportError: twisted = None try: import pycares except ImportError: pycares = None define('family', default='unspec', help='Address family to query: unspec, inet, or inet6') @gen.coroutine def main(): args = parse_command_line() if not args: args = ['localhost', 'www.google.com', 'www.facebook.com', 'www.dropbox.com'] resolvers = [Resolver(), ThreadedResolver()] if twisted is not None: from tornado.platform.twisted import TwistedResolver resolvers.append(TwistedResolver()) if pycares is not None: from tornado.platform.caresresolver import CaresResolver resolvers.append(CaresResolver()) family = { 'unspec': socket.AF_UNSPEC, 'inet': socket.AF_INET, 'inet6': socket.AF_INET6, }[options.family] for host in args: print('Resolving %s' % host) for resolver in resolvers: addrinfo = yield resolver.resolve(host, 80, family) print('%s: %s' % (resolver.__class__.__name__, pprint.pformat(addrinfo))) print() if __name__ == '__main__': IOLoop.instance().run_sync(main) tornado-4.5.3/maint/test/000077500000000000000000000000001322420601000152465ustar00rootroot00000000000000tornado-4.5.3/maint/test/README000066400000000000000000000002711322420601000161260ustar00rootroot00000000000000This directory contains additional tests that are not included in the main suite (because e.g. they have extra dependencies, run slowly, or produce more output than a simple pass/fail) tornado-4.5.3/maint/test/appengine/000077500000000000000000000000001322420601000172145ustar00rootroot00000000000000tornado-4.5.3/maint/test/appengine/README000066400000000000000000000007021322420601000200730ustar00rootroot00000000000000Unit test support for app engine. Currently very limited as most of our tests depend on direct network access, but these tests ensure that the modules that are supposed to work on app engine don't depend on any forbidden modules. The code lives in maint/appengine/common, but should be run from the py25 or py27 subdirectories (which contain an app.yaml and a bunch of symlinks). runtests.py is the entry point; cgi_runtests.py is used internally. tornado-4.5.3/maint/test/appengine/common/000077500000000000000000000000001322420601000205045ustar00rootroot00000000000000tornado-4.5.3/maint/test/appengine/common/cgi_runtests.py000066400000000000000000000030351322420601000235700ustar00rootroot00000000000000#!/usr/bin/env python import sys import unittest # Most of our tests depend on IOLoop, which is not usable on app engine. # Run the tests that work, and check that everything else is at least # importable (via tornado.test.import_test) TEST_MODULES = [ 'tornado.httputil.doctests', 'tornado.iostream.doctests', 'tornado.util.doctests', #'tornado.test.auth_test', #'tornado.test.concurrent_test', #'tornado.test.curl_httpclient_test', 'tornado.test.escape_test', #'tornado.test.gen_test', #'tornado.test.httpclient_test', #'tornado.test.httpserver_test', 'tornado.test.httputil_test', 'tornado.test.import_test', #'tornado.test.ioloop_test', #'tornado.test.iostream_test', 'tornado.test.locale_test', #'tornado.test.netutil_test', #'tornado.test.log_test', 'tornado.test.options_test', #'tornado.test.process_test', #'tornado.test.simple_httpclient_test', #'tornado.test.stack_context_test', 'tornado.test.template_test', #'tornado.test.testing_test', #'tornado.test.twisted_test', 'tornado.test.util_test', #'tornado.test.web_test', #'tornado.test.websocket_test', #'tornado.test.wsgi_test', ] def all(): return unittest.defaultTestLoader.loadTestsFromNames(TEST_MODULES) def main(): print "Content-Type: text/plain\r\n\r\n", try: unittest.main(defaultTest='all', argv=sys.argv[:1]) except SystemExit, e: if e.code == 0: print "PASS" else: raise if __name__ == '__main__': main() tornado-4.5.3/maint/test/appengine/common/runtests.py000066400000000000000000000034671322420601000227570ustar00rootroot00000000000000#!/usr/bin/env python from __future__ import with_statement import contextlib import errno import os import random import signal import socket import subprocess import sys import time import urllib2 if __name__ == "__main__": tornado_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')) # dev_appserver doesn't seem to set SO_REUSEADDR port = random.randrange(10000, 11000) # does dev_appserver.py ever live anywhere but /usr/local/bin? proc = subprocess.Popen([sys.executable, "/usr/local/bin/dev_appserver.py", os.path.dirname(os.path.abspath(__file__)), "--port=%d" % port, "--skip_sdk_update_check", ], cwd=tornado_root) try: for i in xrange(50): with contextlib.closing(socket.socket()) as sock: err = sock.connect_ex(('localhost', port)) if err == 0: break elif err != errno.ECONNREFUSED: raise Exception("Got unexpected socket error %d" % err) time.sleep(0.1) else: raise Exception("Server didn't start listening") resp = urllib2.urlopen("http://localhost:%d/" % port) print resp.read() finally: # dev_appserver sometimes ignores SIGTERM (especially on 2.5), # so try a few times to kill it. for sig in [signal.SIGTERM, signal.SIGTERM, signal.SIGKILL]: os.kill(proc.pid, sig) res = os.waitpid(proc.pid, os.WNOHANG) if res != (0,0): break time.sleep(0.1) else: os.waitpid(proc.pid, 0) tornado-4.5.3/maint/test/appengine/py27/000077500000000000000000000000001322420601000200155ustar00rootroot00000000000000tornado-4.5.3/maint/test/appengine/py27/app.yaml000066400000000000000000000002221322420601000214550ustar00rootroot00000000000000application: tornado-tests-appengine27 version: 1 runtime: python27 threadsafe: false api_version: 1 handlers: - url: / script: cgi_runtests.pytornado-4.5.3/maint/test/appengine/py27/cgi_runtests.py000077700000000000000000000000001322420601000276612../common/cgi_runtests.pyustar00rootroot00000000000000tornado-4.5.3/maint/test/appengine/py27/runtests.py000077700000000000000000000000001322420601000262152../common/runtests.pyustar00rootroot00000000000000tornado-4.5.3/maint/test/appengine/py27/tornado000077700000000000000000000000001322420601000240462../../../../tornadoustar00rootroot00000000000000tornado-4.5.3/maint/test/appengine/setup.py000066400000000000000000000002261322420601000207260ustar00rootroot00000000000000# Dummy setup file to make tox happy. In the appengine world things aren't # installed through setup.py import distutils.core distutils.core.setup() tornado-4.5.3/maint/test/appengine/tox.ini000066400000000000000000000007101322420601000205250ustar00rootroot00000000000000# App Engine tests require the SDK to be installed separately. # Version 1.6.1 or newer is required (older versions don't work when # python is run from a virtualenv) # # These are currently excluded from the main tox.ini because their # logs are spammy and they're a little flaky. [tox] envlist = py27-appengine [testenv] changedir = {toxworkdir} [testenv:py27-appengine] basepython = python2.7 commands = python {toxinidir}/py27/runtests.py {posargs:} tornado-4.5.3/maint/test/cython/000077500000000000000000000000001322420601000165525ustar00rootroot00000000000000tornado-4.5.3/maint/test/cython/.gitignore000066400000000000000000000000361322420601000205410ustar00rootroot00000000000000.eggs cythonapp.egg-info dist tornado-4.5.3/maint/test/cython/MANIFEST.in000066400000000000000000000000261322420601000203060ustar00rootroot00000000000000include cythonapp.pyx tornado-4.5.3/maint/test/cython/cythonapp.pyx000066400000000000000000000014141322420601000213210ustar00rootroot00000000000000import cython from tornado import gen import pythonmodule async def native_coroutine(): x = await pythonmodule.hello() if x != "hello": raise ValueError("expected hello, got %r" % x) return "goodbye" @gen.coroutine def decorated_coroutine(): x = yield pythonmodule.hello() if x != "hello": raise ValueError("expected hello, got %r" % x) return "goodbye" # The binding directive is necessary for compatibility with # ArgReplacer (and therefore return_future), but only because # this is a static function. @cython.binding(True) def function_with_args(one, two, three): return (one, two, three) class AClass: # methods don't need the binding directive. def method_with_args(one, two, three): return (one, two, three) tornado-4.5.3/maint/test/cython/cythonapp_test.py000066400000000000000000000022541322420601000221730ustar00rootroot00000000000000from tornado.testing import AsyncTestCase, gen_test from tornado.util import ArgReplacer import unittest import cythonapp class CythonCoroutineTest(AsyncTestCase): @gen_test def test_native_coroutine(self): x = yield cythonapp.native_coroutine() self.assertEqual(x, "goodbye") @gen_test def test_decorated_coroutine(self): x = yield cythonapp.decorated_coroutine() self.assertEqual(x, "goodbye") class CythonArgReplacerTest(unittest.TestCase): def test_arg_replacer_function(self): replacer = ArgReplacer(cythonapp.function_with_args, 'two') args = (1, 'old', 3) kwargs = {} self.assertEqual(replacer.get_old_value(args, kwargs), 'old') self.assertEqual(replacer.replace('new', args, kwargs), ('old', [1, 'new', 3], {})) def test_arg_replacer_method(self): replacer = ArgReplacer(cythonapp.AClass().method_with_args, 'two') args = (1, 'old', 3) kwargs = {} self.assertEqual(replacer.get_old_value(args, kwargs), 'old') self.assertEqual(replacer.replace('new', args, kwargs), ('old', [1, 'new', 3], {})) tornado-4.5.3/maint/test/cython/pythonmodule.py000066400000000000000000000001561322420601000216550ustar00rootroot00000000000000from tornado import gen @gen.coroutine def hello(): yield gen.sleep(0.001) raise gen.Return("hello") tornado-4.5.3/maint/test/cython/setup.py000066400000000000000000000005231322420601000202640ustar00rootroot00000000000000from setuptools import setup try: import Cython.Build except: Cython = None if Cython is None: ext_modules = None else: ext_modules=Cython.Build.cythonize('cythonapp.pyx') setup( name='cythonapp', py_modules=['cythonapp_test', 'pythonmodule'], ext_modules=ext_modules, setup_requires='Cython>=0.23.1', ) tornado-4.5.3/maint/test/cython/tox.ini000066400000000000000000000007221322420601000200660ustar00rootroot00000000000000[tox] # This currently segfaults on pypy. envlist = py27,py33,py34,py35,py36 [testenv] deps = ../../.. Cython>=0.23.3 backports_abc>=0.4 singledispatch commands = python -m unittest cythonapp_test # Most of these are defaults, but if you specify any you can't fall back # defaults for the others. basepython = py27: python2.7 py33: python3.3 py34: python3.4 py35: python3.5 py36: python3.6 tornado-4.5.3/maint/test/pyuv/000077500000000000000000000000001322420601000162515ustar00rootroot00000000000000tornado-4.5.3/maint/test/pyuv/tox.ini000066400000000000000000000003671322420601000175720ustar00rootroot00000000000000[tox] envlist = py27 setupdir = ../../.. [testenv] commands = python -m tornado.test.runtests --ioloop=tornaduv.UVLoop {posargs:} # twisted tests don't work on pyuv IOLoop currently. deps = pyuv tornaduv futures mock tornado-4.5.3/maint/test/redbot/000077500000000000000000000000001322420601000165255ustar00rootroot00000000000000tornado-4.5.3/maint/test/redbot/README000066400000000000000000000005111322420601000174020ustar00rootroot00000000000000Redbot is an HTTP validator that checks for common problems, especially related to cacheability. These tests ensure that Tornado's default behavior is correct (but note that this guarantee does not automatically extend to applications built on Tornado since application behavior can impact cacheability. http://redbot.org/abouttornado-4.5.3/maint/test/redbot/red_test.py000066400000000000000000000212531322420601000207130ustar00rootroot00000000000000#!/usr/bin/env python import logging from redbot.resource import HttpResource import redbot.speak as rs import thor import threading from tornado import gen from tornado.options import parse_command_line from tornado.testing import AsyncHTTPTestCase, LogTrapTestCase from tornado.web import RequestHandler, Application, asynchronous import unittest class HelloHandler(RequestHandler): def get(self): self.write("Hello world") class RedirectHandler(RequestHandler): def get(self, path): self.redirect(path, status=int(self.get_argument('status', '302'))) class PostHandler(RequestHandler): def post(self): assert self.get_argument('foo') == 'bar' self.redirect('/hello', status=303) class ChunkedHandler(RequestHandler): @asynchronous @gen.engine def get(self): self.write('hello ') yield gen.Task(self.flush) self.write('world') yield gen.Task(self.flush) self.finish() class CacheHandler(RequestHandler): def get(self, computed_etag): self.write(computed_etag) def compute_etag(self): return self._write_buffer[0] class TestMixin(object): def get_handlers(self): return [ ('/hello', HelloHandler), ('/redirect(/.*)', RedirectHandler), ('/post', PostHandler), ('/chunked', ChunkedHandler), ('/cache/(.*)', CacheHandler), ] def get_app_kwargs(self): return dict(static_path='.') def get_allowed_warnings(self): return [ # We can't set a non-heuristic freshness at the framework level, # so just ignore this warning rs.FRESHNESS_HEURISTIC, # For our small test responses the Content-Encoding header # wipes out any gains from compression rs.CONNEG_GZIP_BAD, ] def get_allowed_errors(self): return [] def check_url(self, path, method='GET', body=None, headers=None, expected_status=200, allowed_warnings=None, allowed_errors=None): url = self.get_url(path) red = self.run_redbot(url, method, body, headers) if not red.response.complete: if isinstance(red.response.http_error, Exception): logging.warning((red.response.http_error.desc, vars(red.response.http_error), url)) raise red.response.http_error.res_error else: raise Exception("unknown error; incomplete response") self.assertEqual(int(red.response.status_code), expected_status) allowed_warnings = (allowed_warnings or []) + self.get_allowed_warnings() allowed_errors = (allowed_errors or []) + self.get_allowed_errors() errors = [] warnings = [] for msg in red.response.notes: if msg.level == 'bad': logger = logging.error if not isinstance(msg, tuple(allowed_errors)): errors.append(msg) elif msg.level == 'warning': logger = logging.warning if not isinstance(msg, tuple(allowed_warnings)): warnings.append(msg) elif msg.level in ('good', 'info', 'uri'): logger = logging.info else: raise Exception('unknown level' + msg.level) logger('%s: %s (%s)', msg.category, msg.show_summary('en'), msg.__class__.__name__) logger(msg.show_text('en')) self.assertEqual(len(warnings) + len(errors), 0, 'Had %d unexpected warnings and %d errors' % (len(warnings), len(errors))) def run_redbot(self, url, method, body, headers): red = HttpResource(url, method=method, req_body=body, req_hdrs=headers) def work(): red.run(thor.stop) thor.run() self.io_loop.add_callback(self.stop) thread = threading.Thread(target=work) thread.start() self.wait() thread.join() return red def test_hello(self): self.check_url('/hello') def test_static(self): # TODO: 304 responses SHOULD return the same etag that a full # response would. We currently do for If-None-Match, but not # for If-Modified-Since (because IMS does not otherwise # require us to read the file from disk) self.check_url('/static/red_test.py', allowed_warnings=[rs.MISSING_HDRS_304]) def test_static_versioned_url(self): self.check_url('/static/red_test.py?v=1234', allowed_warnings=[rs.MISSING_HDRS_304]) def test_redirect(self): self.check_url('/redirect/hello', expected_status=302) def test_permanent_redirect(self): self.check_url('/redirect/hello?status=301', expected_status=301) def test_404(self): self.check_url('/404', expected_status=404) def test_post(self): body = 'foo=bar' # Without an explicit Content-Length redbot will try to send the # request chunked. self.check_url( '/post', method='POST', body=body, headers=[('Content-Length', str(len(body))), ('Content-Type', 'application/x-www-form-urlencoded')], expected_status=303) def test_chunked(self): self.check_url('/chunked') def test_strong_etag_match(self): computed_etag = '"xyzzy"' etags = '"xyzzy"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=304) def test_multiple_strong_etag_match(self): computed_etag = '"xyzzy1"' etags = '"xyzzy1", "xyzzy2"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=304) def test_strong_etag_not_match(self): computed_etag = '"xyzzy"' etags = '"xyzzy1"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=200) def test_multiple_strong_etag_not_match(self): computed_etag = '"xyzzy"' etags = '"xyzzy1", "xyzzy2"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=200) def test_wildcard_etag(self): computed_etag = '"xyzzy"' etags = '*' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=304, allowed_warnings=[rs.MISSING_HDRS_304]) def test_weak_etag_match(self): computed_etag = '"xyzzy1"' etags = 'W/"xyzzy1"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=304) def test_multiple_weak_etag_match(self): computed_etag = '"xyzzy2"' etags = 'W/"xyzzy1", W/"xyzzy2"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=304) def test_weak_etag_not_match(self): computed_etag = '"xyzzy2"' etags = 'W/"xyzzy1"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=200) def test_multiple_weak_etag_not_match(self): computed_etag = '"xyzzy3"' etags = 'W/"xyzzy1", W/"xyzzy2"' self.check_url( '/cache/' + computed_etag, method='GET', headers=[('If-None-Match', etags)], expected_status=200) class DefaultHTTPTest(AsyncHTTPTestCase, LogTrapTestCase, TestMixin): def get_app(self): return Application(self.get_handlers(), **self.get_app_kwargs()) class GzipHTTPTest(AsyncHTTPTestCase, LogTrapTestCase, TestMixin): def get_app(self): return Application(self.get_handlers(), gzip=True, **self.get_app_kwargs()) def get_allowed_errors(self): return super(GzipHTTPTest, self).get_allowed_errors() + [ # TODO: The Etag is supposed to change when Content-Encoding is # used. This should be fixed, but it's difficult to do with the # way GZipContentEncoding fits into the pipeline, and in practice # it doesn't seem likely to cause any problems as long as we're # using the correct Vary header. rs.VARY_ETAG_DOESNT_CHANGE, ] if __name__ == '__main__': parse_command_line() unittest.main() tornado-4.5.3/maint/test/redbot/tox.ini000066400000000000000000000003231322420601000200360ustar00rootroot00000000000000[tox] envlist = py27 setupdir=../../.. [testenv] commands = python red_test.py deps = # Newer versions of thor have a bug with redbot (5/18/13) thor==0.2.0 git+https://github.com/mnot/redbot.git tornado-4.5.3/maint/test/websocket/000077500000000000000000000000001322420601000172345ustar00rootroot00000000000000tornado-4.5.3/maint/test/websocket/.gitignore000066400000000000000000000000111322420601000212140ustar00rootroot00000000000000reports/ tornado-4.5.3/maint/test/websocket/client.py000066400000000000000000000025531322420601000210710ustar00rootroot00000000000000#!/usr/bin/env python import logging from tornado import gen from tornado.ioloop import IOLoop from tornado.options import define, options, parse_command_line from tornado.websocket import websocket_connect define('url', default='ws://localhost:9001') define('name', default='Tornado') @gen.engine def run_tests(): url = options.url + '/getCaseCount' control_ws = yield websocket_connect(url, None) num_tests = int((yield control_ws.read_message())) logging.info('running %d cases', num_tests) msg = yield control_ws.read_message() assert msg is None for i in range(1, num_tests + 1): logging.info('running test case %d', i) url = options.url + '/runCase?case=%d&agent=%s' % (i, options.name) test_ws = yield websocket_connect(url, None, compression_options={}) while True: message = yield test_ws.read_message() if message is None: break test_ws.write_message(message, binary=isinstance(message, bytes)) url = options.url + '/updateReports?agent=%s' % options.name update_ws = yield websocket_connect(url, None) msg = yield update_ws.read_message() assert msg is None IOLoop.instance().stop() def main(): parse_command_line() IOLoop.instance().add_callback(run_tests) IOLoop.instance().start() if __name__ == '__main__': main() tornado-4.5.3/maint/test/websocket/fuzzingclient.json000066400000000000000000000010321322420601000230160ustar00rootroot00000000000000{ "options": {"failByDrop": false}, "outdir": "./reports/servers", "servers": [ {"agent": "Tornado/py27", "url": "ws://localhost:9001", "options": {"version": 18}}, {"agent": "Tornado/py35", "url": "ws://localhost:9002", "options": {"version": 18}}, {"agent": "Tornado/pypy", "url": "ws://localhost:9003", "options": {"version": 18}} ], "cases": ["*"], "exclude-cases": ["9.*", "12.*.1","12.2.*", "12.3.*", "12.4.*", "12.5.*", "13.*.1"], "exclude-agent-cases": {} } tornado-4.5.3/maint/test/websocket/fuzzingserver.json000066400000000000000000000004131322420601000230500ustar00rootroot00000000000000 { "url": "ws://localhost:9001", "options": {"failByDrop": false}, "outdir": "./reports/clients", "webport": 8080, "cases": ["*"], "exclude-cases": ["9.*", "12.*.1","12.2.*", "12.3.*", "12.4.*", "12.5.*", "13.*.1"], "exclude-agent-cases": {} } tornado-4.5.3/maint/test/websocket/run-client.sh000077500000000000000000000005411322420601000216530ustar00rootroot00000000000000#!/bin/sh set -e tox .tox/py27/bin/wstest -m fuzzingserver & FUZZING_SERVER_PID=$! sleep 1 .tox/py27/bin/python client.py --name='Tornado/py27' .tox/py35/bin/python client.py --name='Tornado/py35' .tox/pypy/bin/python client.py --name='Tornado/pypy' kill $FUZZING_SERVER_PID wait echo "Tests complete. Output is in ./reports/clients/index.html" tornado-4.5.3/maint/test/websocket/run-server.sh000077500000000000000000000014271322420601000217070ustar00rootroot00000000000000#!/bin/sh # # Runs the autobahn websocket conformance test against tornado in both # python2 and python3. Output goes in ./reports/servers/index.html. # # The --cases and --exclude arguments can be used to run only part of # the suite. The default is --exclude="9.*" to skip the relatively slow # performance tests; pass --exclude="" to override and include them. set -e # build/update the virtualenvs tox .tox/py27/bin/python server.py --port=9001 & PY27_SERVER_PID=$! .tox/py35/bin/python server.py --port=9002 & PY35_SERVER_PID=$! .tox/pypy/bin/python server.py --port=9003 & PYPY_SERVER_PID=$! sleep 1 .tox/py27/bin/wstest -m fuzzingclient kill $PY27_SERVER_PID kill $PY35_SERVER_PID kill $PYPY_SERVER_PID wait echo "Tests complete. Output is in ./reports/servers/index.html" tornado-4.5.3/maint/test/websocket/server.py000066400000000000000000000012001322420601000211050ustar00rootroot00000000000000#!/usr/bin/env python from tornado.ioloop import IOLoop from tornado.options import define, options, parse_command_line from tornado.websocket import WebSocketHandler from tornado.web import Application define('port', default=9000) class EchoHandler(WebSocketHandler): def on_message(self, message): self.write_message(message, binary=isinstance(message, bytes)) def get_compression_options(self): return {} if __name__ == '__main__': parse_command_line() app = Application([ ('/', EchoHandler), ]) app.listen(options.port, address='127.0.0.1') IOLoop.instance().start() tornado-4.5.3/maint/test/websocket/tox.ini000066400000000000000000000004441322420601000205510ustar00rootroot00000000000000# We don't actually use tox to run this test, but it's the easiest way # to install autobahn and build the speedups module. # See run.sh for the real test runner. [tox] envlist = py27, py35, pypy setupdir=../../.. [testenv] commands = python -c pass [testenv:py27] deps = autobahntestsuite tornado-4.5.3/maint/vm/000077500000000000000000000000001322420601000147115ustar00rootroot00000000000000tornado-4.5.3/maint/vm/README000066400000000000000000000016431322420601000155750ustar00rootroot00000000000000This directory contains virtual machine setup scripts for testing Tornado. Requirements: Vagrant (http://vagrantup.com) and VirtualBox (http://virtualbox.org). Vagrant provides an easy download for Ubuntu images, base images for other platforms are harder to find and can be built with VeeWee (https://github.com/jedi4ever/veewee). Usage: cd to the appropriate directory and run `vagrant up`, then `vagrant ssh`. From there, simply run `tox` to run the full test suite, or cd to /tornado and test manually. Afterwards, use `vagrant suspend` or `vagrant destroy` to clean up. Notes: Python distutils (and therefore tox) assume that if the platform supports hard links, they can be used in the Tornado source directory. VirtualBox's shared folder filesystem does not support hard links (or symlinks), so we have to use NFS shared folders instead. (which has the unfortunate side effect of requiring sudo on the host machine) tornado-4.5.3/maint/vm/freebsd/000077500000000000000000000000001322420601000163235ustar00rootroot00000000000000tornado-4.5.3/maint/vm/freebsd/Vagrantfile000066400000000000000000000015471322420601000205170ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! VAGRANTFILE_API_VERSION = "2" Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.box = "chef/freebsd-10.0" config.vm.network "private_network", type: "dhcp" # Share an additional folder to the guest VM. The first argument is # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. config.vm.synced_folder "../../..", "/tornado", type: "nfs" # Override the default /vagrant mapping to use nfs, since freebsd doesn't # support other folder types. config.vm.synced_folder ".", "/vagrant", type: "nfs" config.ssh.shell = "/bin/sh" config.vm.provision :shell, :path => "setup.sh" end tornado-4.5.3/maint/vm/freebsd/setup.sh000066400000000000000000000003551322420601000200220ustar00rootroot00000000000000#!/bin/sh chsh -s bash vagrant PACKAGES=" curl python python34 py27-pip py27-virtualenv " PIP_PACKAGES=" futures pycurl tox " ASSUME_ALWAYS_YES=true pkg install $PACKAGES pip install $PIP_PACKAGES /tornado/maint/vm/shared-setup.sh tornado-4.5.3/maint/vm/freebsd/tox.ini000066400000000000000000000004751322420601000176440ustar00rootroot00000000000000[tox] envlist=py27-full, py27, py34 setupdir=/tornado # /home is a symlink to /usr/home, but tox doesn't like symlinks here toxworkdir=/usr/home/vagrant/tox-tornado [testenv] commands = python -m tornado.test.runtests {posargs:} [testenv:py27-full] # twisted's tests fail on freebsd deps = futures pycurl tornado-4.5.3/maint/vm/shared-setup.sh000077500000000000000000000004251322420601000176550ustar00rootroot00000000000000#!/bin/sh # Run at the end of each vm's provisioning script set -e # Link tox.ini into the home directory so you can run tox immediately # after ssh'ing in without cd'ing to /vagrant (since cd'ing to /tornado # gets the wrong config) ln -sf /vagrant/tox.ini ~vagrant/tox.ini tornado-4.5.3/maint/vm/ubuntu12.04/000077500000000000000000000000001322420601000166205ustar00rootroot00000000000000tornado-4.5.3/maint/vm/ubuntu12.04/Vagrantfile000066400000000000000000000004651322420601000210120ustar00rootroot00000000000000Vagrant::Config.run do |config| config.vm.box = "precise64" config.vm.box_url = "http://files.vagrantup.com/precise64.box" config.vm.network :hostonly, "172.19.1.5" config.vm.share_folder("tornado", "/tornado", "../../..", :nfs=> true) config.vm.provision :shell, :path => "setup.sh" endtornado-4.5.3/maint/vm/ubuntu12.04/setup.sh000066400000000000000000000014531322420601000203170ustar00rootroot00000000000000#!/bin/sh set -e apt-get update # libcurl4-gnutls-dev is the default if you ask for libcurl4-dev, but it # has bugs that make our tests deadlock (the relevant tests detect this and # disable themselves, but it means that to get full coverage we have to use # the openssl version). # The oddly-named python-software-properties includes add-apt-repository. APT_PACKAGES=" python-pip python-dev libcurl4-openssl-dev python-software-properties " apt-get -y install $APT_PACKAGES # Ubuntu 12.04 has python 2.7 as default; install more from here. add-apt-repository ppa:fkrull/deadsnakes apt-get update DEADSNAKES_PACKAGES=" python3.5 python3.5-dev " apt-get -y install $DEADSNAKES_PACKAGES PIP_PACKAGES=" futures pycurl tox twisted virtualenv " pip install $PIP_PACKAGES /tornado/maint/vm/shared-setup.sh tornado-4.5.3/maint/vm/ubuntu12.04/tox.ini000066400000000000000000000012451322420601000201350ustar00rootroot00000000000000[tox] envlist = py27-full, py27, py27-select, py27-twisted setupdir=/tornado toxworkdir=/home/vagrant/tox-tornado [testenv] commands = python -m tornado.test.runtests {posargs:} [testenv:py27-full] basepython = python2.7 deps = futures pycurl twisted==12.2.0 [testenv:py27-select] basepython = python2.7 deps = futures pycurl twisted==12.2.0 commands = python -m tornado.test.runtests --ioloop=tornado.platform.select.SelectIOLoop {posargs:} [testenv:py27-twisted] basepython = python2.7 deps = futures pycurl twisted==12.2.0 commands = python -m tornado.test.runtests --ioloop=tornado.platform.twisted.TwistedIOLoop {posargs:} tornado-4.5.3/maint/vm/ubuntu14.04/000077500000000000000000000000001322420601000166225ustar00rootroot00000000000000tornado-4.5.3/maint/vm/ubuntu14.04/Vagrantfile000066400000000000000000000003701322420601000210070ustar00rootroot00000000000000Vagrant::Config.run do |config| config.vm.box = "ubuntu/trusty64" config.vm.network :hostonly, "172.19.1.8" config.vm.share_folder("tornado", "/tornado", "../../..", :nfs=> true) config.vm.provision :shell, :path => "setup.sh" endtornado-4.5.3/maint/vm/ubuntu14.04/setup.sh000066400000000000000000000010541322420601000203160ustar00rootroot00000000000000#!/bin/sh set -e apt-get update # libcurl4-gnutls-dev is the default if you ask for libcurl4-dev, but it # has bugs that make our tests deadlock (the relevant tests detect this and # disable themselves, but it means that to get full coverage we have to use # the openssl version). APT_PACKAGES=" python-pip python-dev python3-pycurl libcurl4-openssl-dev " apt-get -y install $APT_PACKAGES # Ubuntu 14.04 includes python 2.7 and 3.4. PIP_PACKAGES=" futures pycurl tox twisted virtualenv " pip install $PIP_PACKAGES /tornado/maint/vm/shared-setup.sh tornado-4.5.3/maint/vm/ubuntu14.04/tox.ini000066400000000000000000000013111322420601000201310ustar00rootroot00000000000000[tox] envlist = py27-full, py34, py27, py27-select, py27-twisted setupdir=/tornado toxworkdir=/home/vagrant/tox-tornado [testenv] commands = python -m tornado.test.runtests {posargs:} [testenv:py27-full] basepython = python2.7 deps = futures mock pycurl twisted==14.0.0 [testenv:py27-select] basepython = python2.7 deps = futures mock pycurl twisted==14.0.0 commands = python -m tornado.test.runtests --ioloop=tornado.platform.select.SelectIOLoop {posargs:} [testenv:py27-twisted] basepython = python2.7 deps = futures mock pycurl twisted==14.0.0 commands = python -m tornado.test.runtests --ioloop=tornado.platform.twisted.TwistedIOLoop {posargs:} tornado-4.5.3/maint/vm/windows/000077500000000000000000000000001322420601000164035ustar00rootroot00000000000000tornado-4.5.3/maint/vm/windows/bootstrap.py000066400000000000000000000062471322420601000210030ustar00rootroot00000000000000r"""Installs files needed for tornado testing on windows. These instructions are compatible with the VMs provided by http://modern.ie. The bootstrapping script works on the WinXP/IE6 and Win8/IE10 configurations, although tornado's tests do not pass on XP. 1) Install virtualbox guest additions (from the device menu in virtualbox) 2) Set up a shared folder to the root of your tornado repo. It must be a read-write mount to use tox, although the tests can be run directly in a read-only mount. This will probably assign drive letter E:. 3) Install Python 2.7 from python.org. 4) Run this script by double-clicking it, or running "c:\python27\python.exe bootstrap.py" in a shell. To run the tests by hand, cd to e:\ and run c:\python27\python.exe -m tornado.test.runtests To run the tests with tox, cd to e:\maint\vm\windows and run c:\python27\scripts\tox To run under cygwin (which must be installed separately), run cd /cygdrive/e; python -m tornado.test.runtests """ import os import subprocess import sys import urllib TMPDIR = r'c:\tornado_bootstrap' PYTHON_VERSIONS = [ (r'c:\python27\python.exe', 'http://www.python.org/ftp/python/2.7.3/python-2.7.3.msi'), (r'c:\python33\python.exe', 'http://www.python.org/ftp/python/3.3.0/python-3.3.0.msi'), ] SCRIPTS_DIR = r'c:\python27\scripts' EASY_INSTALL = os.path.join(SCRIPTS_DIR, 'easy_install.exe') PY_PACKAGES = ['tox', 'virtualenv', 'pip'] def download_to_cache(url, local_name=None): if local_name is None: local_name = url.split('/')[-1] filename = os.path.join(TMPDIR, local_name) if not os.path.exists(filename): data = urllib.urlopen(url).read() with open(filename, 'wb') as f: f.write(data) return filename def main(): if not os.path.exists(TMPDIR): os.mkdir(TMPDIR) os.chdir(TMPDIR) for exe, url in PYTHON_VERSIONS: if os.path.exists(exe): print "%s already exists, skipping" % exe continue print "Installing %s" % url filename = download_to_cache(url) # http://blog.jaraco.com/2012/01/how-i-install-python-on-windows.html subprocess.check_call(['msiexec', '/i', filename, 'ALLUSERS=1', '/passive']) if not os.path.exists(EASY_INSTALL): filename = download_to_cache('http://python-distribute.org/distribute_setup.py') subprocess.check_call([sys.executable, filename]) subprocess.check_call([EASY_INSTALL] + PY_PACKAGES) # cygwin's setup.exe doesn't like being run from a script (looks # UAC-related). If it did, something like this might install it. # (install python, python-setuptools, python3, and easy_install # unittest2 (cygwin's python 2 is 2.6)) #filename = download_to_cache('http://cygwin.com/setup.exe') #CYGTMPDIR = os.path.join(TMPDIR, 'cygwin') #if not os.path.exists(CYGTMPDIR): # os.mkdir(CYGTMPDIR) ## http://www.jbmurphy.com/2011/06/16/powershell-script-to-install-cygwin/ #CYGWIN_ARGS = [filename, '-q', '-l', CYGTMPDIR, # '-s', 'http://mirror.nyi.net/cygwin/', '-R', r'c:\cygwin'] #subprocess.check_call(CYGWIN_ARGS) if __name__ == '__main__': main() tornado-4.5.3/maint/vm/windows/tox.ini000066400000000000000000000011611322420601000177150ustar00rootroot00000000000000[tox] envlist = py27-full, py27, py33, py27-opt, py33-monotonic setupdir = e:\ toxworkdir = c:\tox-tornado [testenv] commands = python -m tornado.test.runtests {posargs:} [testenv:py27-full] basepython = python2.7 deps = futures mock [testenv:py33] # tox's path mappings haven't been updated for py33 yet. basepython = c:\python33\python.exe [testenv:py33-monotonic] basepython = c:\python33\python.exe commands = python -m tornado.test.runtests --ioloop_time_monotonic {posargs:} [testenv:py27-opt] basepython = python2.7 deps = futures mock commands = python -O -m tornado.test.runtests {posargs:} tornado-4.5.3/runtests.sh000077500000000000000000000007771322420601000154200ustar00rootroot00000000000000#!/bin/sh # Run the Tornado test suite. # # Also consider using tox, which uses virtualenv to run the test suite # under multiple versions of python. cd $(dirname $0) # "python -m" differs from "python tornado/test/runtests.py" in how it sets # up the default python path. "python -m" uses the current directory, # while "python file.py" uses the directory containing "file.py" (which is # not what you want if file.py appears within a package you want to import # from) python -m tornado.test.runtests "$@" tornado-4.5.3/setup.py000066400000000000000000000150741322420601000147000ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import platform import sys import warnings try: # Use setuptools if available, for install_requires (among other things). import setuptools from setuptools import setup except ImportError: setuptools = None from distutils.core import setup from distutils.core import Extension # The following code is copied from # https://github.com/mongodb/mongo-python-driver/blob/master/setup.py # to support installing without the extension on platforms where # no compiler is available. from distutils.command.build_ext import build_ext class custom_build_ext(build_ext): """Allow C extension building to fail. The C extension speeds up websocket masking, but is not essential. """ warning_message = """ ******************************************************************** WARNING: %s could not be compiled. No C extensions are essential for Tornado to run, although they do result in significant speed improvements for websockets. %s Here are some hints for popular operating systems: If you are seeing this message on Linux you probably need to install GCC and/or the Python development package for your version of Python. Debian and Ubuntu users should issue the following command: $ sudo apt-get install build-essential python-dev RedHat and CentOS users should issue the following command: $ sudo yum install gcc python-devel Fedora users should issue the following command: $ sudo dnf install gcc python-devel If you are seeing this message on OSX please read the documentation here: http://api.mongodb.org/python/current/installation.html#osx ******************************************************************** """ def run(self): try: build_ext.run(self) except Exception: e = sys.exc_info()[1] sys.stdout.write('%s\n' % str(e)) warnings.warn(self.warning_message % ("Extension modules", "There was an issue with " "your platform configuration" " - see above.")) def build_extension(self, ext): name = ext.name try: build_ext.build_extension(self, ext) except Exception: e = sys.exc_info()[1] sys.stdout.write('%s\n' % str(e)) warnings.warn(self.warning_message % ("The %s extension " "module" % (name,), "The output above " "this warning shows how " "the compilation " "failed.")) kwargs = {} version = "4.5.3" with open('README.rst') as f: kwargs['long_description'] = f.read() if (platform.python_implementation() == 'CPython' and os.environ.get('TORNADO_EXTENSION') != '0'): # This extension builds and works on pypy as well, although pypy's jit # produces equivalent performance. kwargs['ext_modules'] = [ Extension('tornado.speedups', sources=['tornado/speedups.c']), ] if os.environ.get('TORNADO_EXTENSION') != '1': # Unless the user has specified that the extension is mandatory, # fall back to the pure-python implementation on any build failure. kwargs['cmdclass'] = {'build_ext': custom_build_ext} if setuptools is not None: # If setuptools is not available, you're on your own for dependencies. install_requires = [] if sys.version_info < (2, 7): # Only needed indirectly, for singledispatch. install_requires.append('ordereddict') if sys.version_info < (2, 7, 9): install_requires.append('backports.ssl_match_hostname') if sys.version_info < (3, 4): install_requires.append('singledispatch') # Certifi is also optional on 2.7.9+, although making our dependencies # conditional on micro version numbers seems like a bad idea # until we have more declarative metadata. install_requires.append('certifi') if sys.version_info < (3, 5): install_requires.append('backports_abc>=0.4') kwargs['install_requires'] = install_requires setup( name="tornado", version=version, packages=["tornado", "tornado.test", "tornado.platform"], package_data={ # data files need to be listed both here (which determines what gets # installed) and in MANIFEST.in (which determines what gets included # in the sdist tarball) "tornado.test": [ "README", "csv_translations/fr_FR.csv", "gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo", "gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po", "options_test.cfg", "static/robots.txt", "static/sample.xml", "static/sample.xml.gz", "static/sample.xml.bz2", "static/dir/index.html", "static_foo.txt", "templates/utf8.html", "test.crt", "test.key", ], }, author="Facebook", author_email="python-tornado@googlegroups.com", url="http://www.tornadoweb.org/", license="http://www.apache.org/licenses/LICENSE-2.0", description="Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed.", classifiers=[ 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', ], **kwargs ) tornado-4.5.3/tornado/000077500000000000000000000000001322420601000146255ustar00rootroot00000000000000tornado-4.5.3/tornado/__init__.py000066400000000000000000000021241322420601000167350ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Tornado web server and tools.""" from __future__ import absolute_import, division, print_function # version is a human-readable version number. # version_info is a four-tuple for programmatic comparison. The first # three numbers are the components of the version number. The fourth # is zero for an official release, positive for a development branch, # or negative for a release candidate or beta (after the base version # number has been incremented) version = "4.5.3" version_info = (4, 5, 3, 0) tornado-4.5.3/tornado/_locale_data.py000066400000000000000000000111751322420601000175730ustar00rootroot00000000000000#!/usr/bin/env python # coding: utf-8 # # Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Data used by the tornado.locale module.""" from __future__ import absolute_import, division, print_function LOCALE_NAMES = { "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"}, "am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"}, "ar_AR": {"name_en": u"Arabic", "name": u"العربية"}, "bg_BG": {"name_en": u"Bulgarian", "name": u"БългарÑки"}, "bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"}, "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"}, "ca_ES": {"name_en": u"Catalan", "name": u"Català"}, "cs_CZ": {"name_en": u"Czech", "name": u"ÄŒeÅ¡tina"}, "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"}, "da_DK": {"name_en": u"Danish", "name": u"Dansk"}, "de_DE": {"name_en": u"German", "name": u"Deutsch"}, "el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"}, "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"}, "en_US": {"name_en": u"English (US)", "name": u"English (US)"}, "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"}, "es_LA": {"name_en": u"Spanish", "name": u"Español"}, "et_EE": {"name_en": u"Estonian", "name": u"Eesti"}, "eu_ES": {"name_en": u"Basque", "name": u"Euskara"}, "fa_IR": {"name_en": u"Persian", "name": u"ÙØ§Ø±Ø³ÛŒ"}, "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"}, "fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"}, "fr_FR": {"name_en": u"French", "name": u"Français"}, "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"}, "gl_ES": {"name_en": u"Galician", "name": u"Galego"}, "he_IL": {"name_en": u"Hebrew", "name": u"עברית"}, "hi_IN": {"name_en": u"Hindi", "name": u"हिनà¥à¤¦à¥€"}, "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"}, "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"}, "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"}, "is_IS": {"name_en": u"Icelandic", "name": u"Ãslenska"}, "it_IT": {"name_en": u"Italian", "name": u"Italiano"}, "ja_JP": {"name_en": u"Japanese", "name": u"日本語"}, "ko_KR": {"name_en": u"Korean", "name": u"한국어"}, "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"}, "lv_LV": {"name_en": u"Latvian", "name": u"LatvieÅ¡u"}, "mk_MK": {"name_en": u"Macedonian", "name": u"МакедонÑки"}, "ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"}, "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"}, "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmÃ¥l)"}, "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"}, "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"}, "pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"}, "pl_PL": {"name_en": u"Polish", "name": u"Polski"}, "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"}, "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"}, "ro_RO": {"name_en": u"Romanian", "name": u"Română"}, "ru_RU": {"name_en": u"Russian", "name": u"РуÑÑкий"}, "sk_SK": {"name_en": u"Slovak", "name": u"SlovenÄina"}, "sl_SI": {"name_en": u"Slovenian", "name": u"SlovenÅ¡Äina"}, "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"}, "sr_RS": {"name_en": u"Serbian", "name": u"СрпÑки"}, "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"}, "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"}, "ta_IN": {"name_en": u"Tamil", "name": u"தமிழà¯"}, "te_IN": {"name_en": u"Telugu", "name": u"తెలà±à°—à±"}, "th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"}, "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"}, "tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"}, "uk_UA": {"name_en": u"Ukraini ", "name": u"УкраїнÑька"}, "vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"}, "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"}, "zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(ç¹é«”)"}, } tornado-4.5.3/tornado/auth.py000066400000000000000000001351261322420601000161500ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This module contains implementations of various third-party authentication schemes. All the classes in this file are class mixins designed to be used with the `tornado.web.RequestHandler` class. They are used in two ways: * On a login handler, use methods such as ``authenticate_redirect()``, ``authorize_redirect()``, and ``get_authenticated_user()`` to establish the user's identity and store authentication tokens to your database and/or cookies. * In non-login handlers, use methods such as ``facebook_request()`` or ``twitter_request()`` to use the authentication tokens to make requests to the respective services. They all take slightly different arguments due to the fact all these services implement authentication and authorization slightly differently. See the individual service classes below for complete documentation. Example usage for Google OAuth: .. testcode:: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): @tornado.gen.coroutine def get(self): if self.get_argument('code', False): user = yield self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) # Save the user with e.g. set_secure_cookie else: yield self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) .. testoutput:: :hide: .. versionchanged:: 4.0 All of the callback interfaces in this module are now guaranteed to run their callback with an argument of ``None`` on error. Previously some functions would do this while others would simply terminate the request on their own. This change also ensures that errors are more consistently reported through the ``Future`` interfaces. """ from __future__ import absolute_import, division, print_function import base64 import binascii import functools import hashlib import hmac import time import uuid from tornado.concurrent import TracebackFuture, return_future, chain_future from tornado import gen from tornado import httpclient from tornado import escape from tornado.httputil import url_concat from tornado.log import gen_log from tornado.stack_context import ExceptionStackContext from tornado.util import unicode_type, ArgReplacer, PY3 if PY3: import urllib.parse as urlparse import urllib.parse as urllib_parse long = int else: import urlparse import urllib as urllib_parse class AuthError(Exception): pass def _auth_future_to_callback(callback, future): try: result = future.result() except AuthError as e: gen_log.warning(str(e)) result = None callback(result) def _auth_return_future(f): """Similar to tornado.concurrent.return_future, but uses the auth module's legacy callback interface. Note that when using this decorator the ``callback`` parameter inside the function will actually be a future. """ replacer = ArgReplacer(f, 'callback') @functools.wraps(f) def wrapper(*args, **kwargs): future = TracebackFuture() callback, args, kwargs = replacer.replace(future, args, kwargs) if callback is not None: future.add_done_callback( functools.partial(_auth_future_to_callback, callback)) def handle_exception(typ, value, tb): if future.done(): return False else: future.set_exc_info((typ, value, tb)) return True with ExceptionStackContext(handle_exception): f(*args, **kwargs) return future return wrapper class OpenIdMixin(object): """Abstract implementation of OpenID and Attribute Exchange. Class attributes: * ``_OPENID_ENDPOINT``: the identity provider's URI. """ @return_future def authenticate_redirect(self, callback_uri=None, ax_attrs=["name", "email", "language", "username"], callback=None): """Redirects to the authentication URL for this service. After authentication, the service will redirect back to the given callback URI with additional parameters including ``openid.mode``. We request the given attributes for the authenticated user by default (name, email, language, and username). If you don't need all those attributes for your app, you can request fewer with the ax_attrs keyword argument. .. versionchanged:: 3.1 Returns a `.Future` and takes an optional callback. These are not strictly necessary as this method is synchronous, but they are supplied for consistency with `OAuthMixin.authorize_redirect`. """ callback_uri = callback_uri or self.request.uri args = self._openid_args(callback_uri, ax_attrs=ax_attrs) self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args)) callback() @_auth_return_future def get_authenticated_user(self, callback, http_client=None): """Fetches the authenticated user data upon redirect. This method should be called by the handler that receives the redirect from the `authenticate_redirect()` method (which is often the same as the one that calls it; in that case you would call `get_authenticated_user` if the ``openid.mode`` parameter is present and `authenticate_redirect` if it is not). The result of this method will generally be used to set a cookie. """ # Verify the OpenID response via direct request to the OP args = dict((k, v[-1]) for k, v in self.request.arguments.items()) args["openid.mode"] = u"check_authentication" url = self._OPENID_ENDPOINT if http_client is None: http_client = self.get_auth_http_client() http_client.fetch(url, functools.partial( self._on_authentication_verified, callback), method="POST", body=urllib_parse.urlencode(args)) def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None): url = urlparse.urljoin(self.request.full_url(), callback_uri) args = { "openid.ns": "http://specs.openid.net/auth/2.0", "openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select", "openid.identity": "http://specs.openid.net/auth/2.0/identifier_select", "openid.return_to": url, "openid.realm": urlparse.urljoin(url, '/'), "openid.mode": "checkid_setup", } if ax_attrs: args.update({ "openid.ns.ax": "http://openid.net/srv/ax/1.0", "openid.ax.mode": "fetch_request", }) ax_attrs = set(ax_attrs) required = [] if "name" in ax_attrs: ax_attrs -= set(["name", "firstname", "fullname", "lastname"]) required += ["firstname", "fullname", "lastname"] args.update({ "openid.ax.type.firstname": "http://axschema.org/namePerson/first", "openid.ax.type.fullname": "http://axschema.org/namePerson", "openid.ax.type.lastname": "http://axschema.org/namePerson/last", }) known_attrs = { "email": "http://axschema.org/contact/email", "language": "http://axschema.org/pref/language", "username": "http://axschema.org/namePerson/friendly", } for name in ax_attrs: args["openid.ax.type." + name] = known_attrs[name] required.append(name) args["openid.ax.required"] = ",".join(required) if oauth_scope: args.update({ "openid.ns.oauth": "http://specs.openid.net/extensions/oauth/1.0", "openid.oauth.consumer": self.request.host.split(":")[0], "openid.oauth.scope": oauth_scope, }) return args def _on_authentication_verified(self, future, response): if response.error or b"is_valid:true" not in response.body: future.set_exception(AuthError( "Invalid OpenID response: %s" % (response.error or response.body))) return # Make sure we got back at least an email from attribute exchange ax_ns = None for name in self.request.arguments: if name.startswith("openid.ns.") and \ self.get_argument(name) == u"http://openid.net/srv/ax/1.0": ax_ns = name[10:] break def get_ax_arg(uri): if not ax_ns: return u"" prefix = "openid." + ax_ns + ".type." ax_name = None for name in self.request.arguments.keys(): if self.get_argument(name) == uri and name.startswith(prefix): part = name[len(prefix):] ax_name = "openid." + ax_ns + ".value." + part break if not ax_name: return u"" return self.get_argument(ax_name, u"") email = get_ax_arg("http://axschema.org/contact/email") name = get_ax_arg("http://axschema.org/namePerson") first_name = get_ax_arg("http://axschema.org/namePerson/first") last_name = get_ax_arg("http://axschema.org/namePerson/last") username = get_ax_arg("http://axschema.org/namePerson/friendly") locale = get_ax_arg("http://axschema.org/pref/language").lower() user = dict() name_parts = [] if first_name: user["first_name"] = first_name name_parts.append(first_name) if last_name: user["last_name"] = last_name name_parts.append(last_name) if name: user["name"] = name elif name_parts: user["name"] = u" ".join(name_parts) elif email: user["name"] = email.split("@")[0] if email: user["email"] = email if locale: user["locale"] = locale if username: user["username"] = username claimed_id = self.get_argument("openid.claimed_id", None) if claimed_id: user["claimed_id"] = claimed_id future.set_result(user) def get_auth_http_client(self): """Returns the `.AsyncHTTPClient` instance to be used for auth requests. May be overridden by subclasses to use an HTTP client other than the default. """ return httpclient.AsyncHTTPClient() class OAuthMixin(object): """Abstract implementation of OAuth 1.0 and 1.0a. See `TwitterMixin` below for an example implementation. Class attributes: * ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url. * ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url. * ``_OAUTH_VERSION``: May be either "1.0" or "1.0a". * ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires advance registration of callbacks. Subclasses must also override the `_oauth_get_user_future` and `_oauth_consumer_token` methods. """ @return_future def authorize_redirect(self, callback_uri=None, extra_params=None, http_client=None, callback=None): """Redirects the user to obtain OAuth authorization for this service. The ``callback_uri`` may be omitted if you have previously registered a callback URI with the third-party service. For some services (including Friendfeed), you must use a previously-registered callback URI and cannot specify a callback via this method. This method sets a cookie called ``_oauth_request_token`` which is subsequently used (and cleared) in `get_authenticated_user` for security purposes. Note that this method is asynchronous, although it calls `.RequestHandler.finish` for you so it may not be necessary to pass a callback or use the `.Future` it returns. However, if this method is called from a function decorated with `.gen.coroutine`, you must call it with ``yield`` to keep the response from being closed prematurely. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. """ if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False): raise Exception("This service does not support oauth_callback") if http_client is None: http_client = self.get_auth_http_client() if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": http_client.fetch( self._oauth_request_token_url(callback_uri=callback_uri, extra_params=extra_params), functools.partial( self._on_request_token, self._OAUTH_AUTHORIZE_URL, callback_uri, callback)) else: http_client.fetch( self._oauth_request_token_url(), functools.partial( self._on_request_token, self._OAUTH_AUTHORIZE_URL, callback_uri, callback)) @_auth_return_future def get_authenticated_user(self, callback, http_client=None): """Gets the OAuth authorized user and access token. This method should be called from the handler for your OAuth callback URL to complete the registration process. We run the callback with the authenticated user dictionary. This dictionary will contain an ``access_key`` which can be used to make authorized requests to this service on behalf of the user. The dictionary will also contain other fields such as ``name``, depending on the service used. """ future = callback request_key = escape.utf8(self.get_argument("oauth_token")) oauth_verifier = self.get_argument("oauth_verifier", None) request_cookie = self.get_cookie("_oauth_request_token") if not request_cookie: future.set_exception(AuthError( "Missing OAuth request token cookie")) return self.clear_cookie("_oauth_request_token") cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")] if cookie_key != request_key: future.set_exception(AuthError( "Request token does not match cookie")) return token = dict(key=cookie_key, secret=cookie_secret) if oauth_verifier: token["verifier"] = oauth_verifier if http_client is None: http_client = self.get_auth_http_client() http_client.fetch(self._oauth_access_token_url(token), functools.partial(self._on_access_token, callback)) def _oauth_request_token_url(self, callback_uri=None, extra_params=None): consumer_token = self._oauth_consumer_token() url = self._OAUTH_REQUEST_TOKEN_URL args = dict( oauth_consumer_key=escape.to_basestring(consumer_token["key"]), oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), oauth_version="1.0", ) if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": if callback_uri == "oob": args["oauth_callback"] = "oob" elif callback_uri: args["oauth_callback"] = urlparse.urljoin( self.request.full_url(), callback_uri) if extra_params: args.update(extra_params) signature = _oauth10a_signature(consumer_token, "GET", url, args) else: signature = _oauth_signature(consumer_token, "GET", url, args) args["oauth_signature"] = signature return url + "?" + urllib_parse.urlencode(args) def _on_request_token(self, authorize_url, callback_uri, callback, response): if response.error: raise Exception("Could not get request token: %s" % response.error) request_token = _oauth_parse_response(response.body) data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" + base64.b64encode(escape.utf8(request_token["secret"]))) self.set_cookie("_oauth_request_token", data) args = dict(oauth_token=request_token["key"]) if callback_uri == "oob": self.finish(authorize_url + "?" + urllib_parse.urlencode(args)) callback() return elif callback_uri: args["oauth_callback"] = urlparse.urljoin( self.request.full_url(), callback_uri) self.redirect(authorize_url + "?" + urllib_parse.urlencode(args)) callback() def _oauth_access_token_url(self, request_token): consumer_token = self._oauth_consumer_token() url = self._OAUTH_ACCESS_TOKEN_URL args = dict( oauth_consumer_key=escape.to_basestring(consumer_token["key"]), oauth_token=escape.to_basestring(request_token["key"]), oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), oauth_version="1.0", ) if "verifier" in request_token: args["oauth_verifier"] = request_token["verifier"] if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": signature = _oauth10a_signature(consumer_token, "GET", url, args, request_token) else: signature = _oauth_signature(consumer_token, "GET", url, args, request_token) args["oauth_signature"] = signature return url + "?" + urllib_parse.urlencode(args) def _on_access_token(self, future, response): if response.error: future.set_exception(AuthError("Could not fetch access token")) return access_token = _oauth_parse_response(response.body) self._oauth_get_user_future(access_token).add_done_callback( functools.partial(self._on_oauth_get_user, access_token, future)) def _oauth_consumer_token(self): """Subclasses must override this to return their OAuth consumer keys. The return value should be a `dict` with keys ``key`` and ``secret``. """ raise NotImplementedError() @return_future def _oauth_get_user_future(self, access_token, callback): """Subclasses must override this to get basic information about the user. Should return a `.Future` whose result is a dictionary containing information about the user, which may have been retrieved by using ``access_token`` to make a request to the service. The access token will be added to the returned dictionary to make the result of `get_authenticated_user`. For backwards compatibility, the callback-based ``_oauth_get_user`` method is also supported. """ # By default, call the old-style _oauth_get_user, but new code # should override this method instead. self._oauth_get_user(access_token, callback) def _oauth_get_user(self, access_token, callback): raise NotImplementedError() def _on_oauth_get_user(self, access_token, future, user_future): if user_future.exception() is not None: future.set_exception(user_future.exception()) return user = user_future.result() if not user: future.set_exception(AuthError("Error getting user")) return user["access_token"] = access_token future.set_result(user) def _oauth_request_parameters(self, url, access_token, parameters={}, method="GET"): """Returns the OAuth parameters as a dict for the given request. parameters should include all POST arguments and query string arguments that will be sent with the request. """ consumer_token = self._oauth_consumer_token() base_args = dict( oauth_consumer_key=escape.to_basestring(consumer_token["key"]), oauth_token=escape.to_basestring(access_token["key"]), oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), oauth_version="1.0", ) args = {} args.update(base_args) args.update(parameters) if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": signature = _oauth10a_signature(consumer_token, method, url, args, access_token) else: signature = _oauth_signature(consumer_token, method, url, args, access_token) base_args["oauth_signature"] = escape.to_basestring(signature) return base_args def get_auth_http_client(self): """Returns the `.AsyncHTTPClient` instance to be used for auth requests. May be overridden by subclasses to use an HTTP client other than the default. """ return httpclient.AsyncHTTPClient() class OAuth2Mixin(object): """Abstract implementation of OAuth 2.0. See `FacebookGraphMixin` or `GoogleOAuth2Mixin` below for example implementations. Class attributes: * ``_OAUTH_AUTHORIZE_URL``: The service's authorization url. * ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url. """ @return_future def authorize_redirect(self, redirect_uri=None, client_id=None, client_secret=None, extra_params=None, callback=None, scope=None, response_type="code"): """Redirects the user to obtain OAuth authorization for this service. Some providers require that you register a redirect URL with your application instead of passing one via this method. You should call this method to log the user in, and then call ``get_authenticated_user`` in the handler for your redirect URL to complete the authorization process. .. versionchanged:: 3.1 Returns a `.Future` and takes an optional callback. These are not strictly necessary as this method is synchronous, but they are supplied for consistency with `OAuthMixin.authorize_redirect`. """ args = { "redirect_uri": redirect_uri, "client_id": client_id, "response_type": response_type } if extra_params: args.update(extra_params) if scope: args['scope'] = ' '.join(scope) self.redirect( url_concat(self._OAUTH_AUTHORIZE_URL, args)) callback() def _oauth_request_token_url(self, redirect_uri=None, client_id=None, client_secret=None, code=None, extra_params=None): url = self._OAUTH_ACCESS_TOKEN_URL args = dict( redirect_uri=redirect_uri, code=code, client_id=client_id, client_secret=client_secret, ) if extra_params: args.update(extra_params) return url_concat(url, args) @_auth_return_future def oauth2_request(self, url, callback, access_token=None, post_args=None, **args): """Fetches the given URL auth an OAuth2 access token. If the request is a POST, ``post_args`` should be provided. Query string arguments should be given as keyword arguments. Example usage: ..testcode:: class MainHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): @tornado.web.authenticated @tornado.gen.coroutine def get(self): new_entry = yield self.oauth2_request( "https://graph.facebook.com/me/feed", post_args={"message": "I am posting from my Tornado application!"}, access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? yield self.authorize_redirect() return self.finish("Posted a message!") .. testoutput:: :hide: .. versionadded:: 4.3 """ all_args = {} if access_token: all_args["access_token"] = access_token all_args.update(args) if all_args: url += "?" + urllib_parse.urlencode(all_args) callback = functools.partial(self._on_oauth2_request, callback) http = self.get_auth_http_client() if post_args is not None: http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), callback=callback) else: http.fetch(url, callback=callback) def _on_oauth2_request(self, future, response): if response.error: future.set_exception(AuthError("Error response %s fetching %s" % (response.error, response.request.url))) return future.set_result(escape.json_decode(response.body)) def get_auth_http_client(self): """Returns the `.AsyncHTTPClient` instance to be used for auth requests. May be overridden by subclasses to use an HTTP client other than the default. .. versionadded:: 4.3 """ return httpclient.AsyncHTTPClient() class TwitterMixin(OAuthMixin): """Twitter OAuth authentication. To authenticate with Twitter, register your application with Twitter at http://twitter.com/apps. Then copy your Consumer Key and Consumer Secret to the application `~tornado.web.Application.settings` ``twitter_consumer_key`` and ``twitter_consumer_secret``. Use this mixin on the handler for the URL you registered as your application's callback URL. When your application is set up, you can use this mixin like this to authenticate the user with Twitter and get access to their stream: .. testcode:: class TwitterLoginHandler(tornado.web.RequestHandler, tornado.auth.TwitterMixin): @tornado.gen.coroutine def get(self): if self.get_argument("oauth_token", None): user = yield self.get_authenticated_user() # Save the user using e.g. set_secure_cookie() else: yield self.authorize_redirect() .. testoutput:: :hide: The user object returned by `~OAuthMixin.get_authenticated_user` includes the attributes ``username``, ``name``, ``access_token``, and all of the custom Twitter user attributes described at https://dev.twitter.com/docs/api/1.1/get/users/show """ _OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token" _OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token" _OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize" _OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate" _OAUTH_NO_CALLBACKS = False _TWITTER_BASE_URL = "https://api.twitter.com/1.1" @return_future def authenticate_redirect(self, callback_uri=None, callback=None): """Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. This is generally the right interface to use if you are using Twitter for single-sign on. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. """ http = self.get_auth_http_client() http.fetch(self._oauth_request_token_url(callback_uri=callback_uri), functools.partial( self._on_request_token, self._OAUTH_AUTHENTICATE_URL, None, callback)) @_auth_return_future def twitter_request(self, path, callback=None, access_token=None, post_args=None, **args): """Fetches the given API path, e.g., ``statuses/user_timeline/btaylor`` The path should not include the format or API version number. (we automatically use JSON format and API version 1). If the request is a POST, ``post_args`` should be provided. Query string arguments should be given as keyword arguments. All the Twitter methods are documented at http://dev.twitter.com/ Many methods require an OAuth access token which you can obtain through `~OAuthMixin.authorize_redirect` and `~OAuthMixin.get_authenticated_user`. The user returned through that process includes an 'access_token' attribute that can be used to make authenticated requests via this method. Example usage: .. testcode:: class MainHandler(tornado.web.RequestHandler, tornado.auth.TwitterMixin): @tornado.web.authenticated @tornado.gen.coroutine def get(self): new_entry = yield self.twitter_request( "/statuses/update", post_args={"status": "Testing Tornado Web Server"}, access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? yield self.authorize_redirect() return self.finish("Posted a message!") .. testoutput:: :hide: """ if path.startswith('http:') or path.startswith('https:'): # Raw urls are useful for e.g. search which doesn't follow the # usual pattern: http://search.twitter.com/search.json url = path else: url = self._TWITTER_BASE_URL + path + ".json" # Add the OAuth resource request signature if we have credentials if access_token: all_args = {} all_args.update(args) all_args.update(post_args or {}) method = "POST" if post_args is not None else "GET" oauth = self._oauth_request_parameters( url, access_token, all_args, method=method) args.update(oauth) if args: url += "?" + urllib_parse.urlencode(args) http = self.get_auth_http_client() http_callback = functools.partial(self._on_twitter_request, callback) if post_args is not None: http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), callback=http_callback) else: http.fetch(url, callback=http_callback) def _on_twitter_request(self, future, response): if response.error: future.set_exception(AuthError( "Error response %s fetching %s" % (response.error, response.request.url))) return future.set_result(escape.json_decode(response.body)) def _oauth_consumer_token(self): self.require_setting("twitter_consumer_key", "Twitter OAuth") self.require_setting("twitter_consumer_secret", "Twitter OAuth") return dict( key=self.settings["twitter_consumer_key"], secret=self.settings["twitter_consumer_secret"]) @gen.coroutine def _oauth_get_user_future(self, access_token): user = yield self.twitter_request( "/account/verify_credentials", access_token=access_token) if user: user["username"] = user["screen_name"] raise gen.Return(user) class GoogleOAuth2Mixin(OAuth2Mixin): """Google authentication using OAuth2. In order to use, register your application with Google and copy the relevant parameters to your application settings. * Go to the Google Dev Console at http://console.developers.google.com * Select a project, or create a new one. * In the sidebar on the left, select APIs & Auth. * In the list of APIs, find the Google+ API service and set it to ON. * In the sidebar on the left, select Credentials. * In the OAuth section of the page, select Create New Client ID. * Set the Redirect URI to point to your auth handler * Copy the "Client secret" and "Client ID" to the application settings as {"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}} .. versionadded:: 3.2 """ _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth" _OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token" _OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo" _OAUTH_NO_CALLBACKS = False _OAUTH_SETTINGS_KEY = 'google_oauth' @_auth_return_future def get_authenticated_user(self, redirect_uri, code, callback): """Handles the login for the Google user, returning an access token. The result is a dictionary containing an ``access_token`` field ([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)). Unlike other ``get_authenticated_user`` methods in this package, this method does not return any additional information about the user. The returned access token can be used with `OAuth2Mixin.oauth2_request` to request additional information (perhaps from ``https://www.googleapis.com/oauth2/v2/userinfo``) Example usage: .. testcode:: class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): @tornado.gen.coroutine def get(self): if self.get_argument('code', False): access = yield self.get_authenticated_user( redirect_uri='http://your.site.com/auth/google', code=self.get_argument('code')) user = yield self.oauth2_request( "https://www.googleapis.com/oauth2/v1/userinfo", access_token=access["access_token"]) # Save the user and access token with # e.g. set_secure_cookie. else: yield self.authorize_redirect( redirect_uri='http://your.site.com/auth/google', client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) .. testoutput:: :hide: """ http = self.get_auth_http_client() body = urllib_parse.urlencode({ "redirect_uri": redirect_uri, "code": code, "client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'], "client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'], "grant_type": "authorization_code", }) http.fetch(self._OAUTH_ACCESS_TOKEN_URL, functools.partial(self._on_access_token, callback), method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body) def _on_access_token(self, future, response): """Callback function for the exchange to the access token.""" if response.error: future.set_exception(AuthError('Google auth error: %s' % str(response))) return args = escape.json_decode(response.body) future.set_result(args) class FacebookGraphMixin(OAuth2Mixin): """Facebook authentication using the new Graph API and OAuth2.""" _OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?" _OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?" _OAUTH_NO_CALLBACKS = False _FACEBOOK_BASE_URL = "https://graph.facebook.com" @_auth_return_future def get_authenticated_user(self, redirect_uri, client_id, client_secret, code, callback, extra_fields=None): """Handles the login for the Facebook user, returning a user object. Example usage: .. testcode:: class FacebookGraphLoginHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): @tornado.gen.coroutine def get(self): if self.get_argument("code", False): user = yield self.get_authenticated_user( redirect_uri='/auth/facebookgraph/', client_id=self.settings["facebook_api_key"], client_secret=self.settings["facebook_secret"], code=self.get_argument("code")) # Save the user with e.g. set_secure_cookie else: yield self.authorize_redirect( redirect_uri='/auth/facebookgraph/', client_id=self.settings["facebook_api_key"], extra_params={"scope": "read_stream,offline_access"}) .. testoutput:: :hide: This method returns a dictionary which may contain the following fields: * ``access_token``, a string which may be passed to `facebook_request` * ``session_expires``, an integer encoded as a string representing the time until the access token expires in seconds. This field should be used like ``int(user['session_expires'])``; in a future version of Tornado it will change from a string to an integer. * ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``, ``link``, plus any fields named in the ``extra_fields`` argument. These fields are copied from the Facebook graph API `user object `_ .. versionchanged:: 4.5 The ``session_expires`` field was updated to support changes made to the Facebook API in March 2017. """ http = self.get_auth_http_client() args = { "redirect_uri": redirect_uri, "code": code, "client_id": client_id, "client_secret": client_secret, } fields = set(['id', 'name', 'first_name', 'last_name', 'locale', 'picture', 'link']) if extra_fields: fields.update(extra_fields) http.fetch(self._oauth_request_token_url(**args), functools.partial(self._on_access_token, redirect_uri, client_id, client_secret, callback, fields)) def _on_access_token(self, redirect_uri, client_id, client_secret, future, fields, response): if response.error: future.set_exception(AuthError('Facebook auth error: %s' % str(response))) return args = escape.json_decode(response.body) session = { "access_token": args.get("access_token"), "expires_in": args.get("expires_in") } self.facebook_request( path="/me", callback=functools.partial( self._on_get_user_info, future, session, fields), access_token=session["access_token"], appsecret_proof=hmac.new(key=client_secret.encode('utf8'), msg=session["access_token"].encode('utf8'), digestmod=hashlib.sha256).hexdigest(), fields=",".join(fields) ) def _on_get_user_info(self, future, session, fields, user): if user is None: future.set_result(None) return fieldmap = {} for field in fields: fieldmap[field] = user.get(field) # session_expires is converted to str for compatibility with # older versions in which the server used url-encoding and # this code simply returned the string verbatim. # This should change in Tornado 5.0. fieldmap.update({"access_token": session["access_token"], "session_expires": str(session.get("expires_in"))}) future.set_result(fieldmap) @_auth_return_future def facebook_request(self, path, callback, access_token=None, post_args=None, **args): """Fetches the given relative API path, e.g., "/btaylor/picture" If the request is a POST, ``post_args`` should be provided. Query string arguments should be given as keyword arguments. An introduction to the Facebook Graph API can be found at http://developers.facebook.com/docs/api Many methods require an OAuth access token which you can obtain through `~OAuth2Mixin.authorize_redirect` and `get_authenticated_user`. The user returned through that process includes an ``access_token`` attribute that can be used to make authenticated requests via this method. Example usage: ..testcode:: class MainHandler(tornado.web.RequestHandler, tornado.auth.FacebookGraphMixin): @tornado.web.authenticated @tornado.gen.coroutine def get(self): new_entry = yield self.facebook_request( "/me/feed", post_args={"message": "I am posting from my Tornado application!"}, access_token=self.current_user["access_token"]) if not new_entry: # Call failed; perhaps missing permission? yield self.authorize_redirect() return self.finish("Posted a message!") .. testoutput:: :hide: The given path is relative to ``self._FACEBOOK_BASE_URL``, by default "https://graph.facebook.com". This method is a wrapper around `OAuth2Mixin.oauth2_request`; the only difference is that this method takes a relative path, while ``oauth2_request`` takes a complete url. .. versionchanged:: 3.1 Added the ability to override ``self._FACEBOOK_BASE_URL``. """ url = self._FACEBOOK_BASE_URL + path # Thanks to the _auth_return_future decorator, our "callback" # argument is a Future, which we cannot pass as a callback to # oauth2_request. Instead, have oauth2_request return a # future and chain them together. oauth_future = self.oauth2_request(url, access_token=access_token, post_args=post_args, **args) chain_future(oauth_future, callback) def _oauth_signature(consumer_token, method, url, parameters={}, token=None): """Calculates the HMAC-SHA1 OAuth signature for the given request. See http://oauth.net/core/1.0/#signing_process """ parts = urlparse.urlparse(url) scheme, netloc, path = parts[:3] normalized_url = scheme.lower() + "://" + netloc.lower() + path base_elems = [] base_elems.append(method.upper()) base_elems.append(normalized_url) base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) for k, v in sorted(parameters.items()))) base_string = "&".join(_oauth_escape(e) for e in base_elems) key_elems = [escape.utf8(consumer_token["secret"])] key_elems.append(escape.utf8(token["secret"] if token else "")) key = b"&".join(key_elems) hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) return binascii.b2a_base64(hash.digest())[:-1] def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None): """Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request. See http://oauth.net/core/1.0a/#signing_process """ parts = urlparse.urlparse(url) scheme, netloc, path = parts[:3] normalized_url = scheme.lower() + "://" + netloc.lower() + path base_elems = [] base_elems.append(method.upper()) base_elems.append(normalized_url) base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) for k, v in sorted(parameters.items()))) base_string = "&".join(_oauth_escape(e) for e in base_elems) key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))] key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else "")) key = b"&".join(key_elems) hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) return binascii.b2a_base64(hash.digest())[:-1] def _oauth_escape(val): if isinstance(val, unicode_type): val = val.encode("utf-8") return urllib_parse.quote(val, safe="~") def _oauth_parse_response(body): # I can't find an officially-defined encoding for oauth responses and # have never seen anyone use non-ascii. Leave the response in a byte # string for python 2, and use utf8 on python 3. body = escape.native_str(body) p = urlparse.parse_qs(body, keep_blank_values=False) token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0]) # Add the extra parameters the Provider included to the token special = ("oauth_token", "oauth_token_secret") token.update((k, p[k][0]) for k in p if k not in special) return token tornado-4.5.3/tornado/autoreload.py000066400000000000000000000302661322420601000173450ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Automatically restart the server when a source file is modified. Most applications should not access this module directly. Instead, pass the keyword argument ``autoreload=True`` to the `tornado.web.Application` constructor (or ``debug=True``, which enables this setting and several others). This will enable autoreload mode as well as checking for changes to templates and static resources. Note that restarting is a destructive operation and any requests in progress will be aborted when the process restarts. (If you want to disable autoreload while using other debug-mode features, pass both ``debug=True`` and ``autoreload=False``). This module can also be used as a command-line wrapper around scripts such as unit test runners. See the `main` method for details. The command-line wrapper and Application debug modes can be used together. This combination is encouraged as the wrapper catches syntax errors and other import-time failures, while debug mode catches changes once the server has started. This module depends on `.IOLoop`, so it will not work in WSGI applications and Google App Engine. It also will not work correctly when `.HTTPServer`'s multi-process mode is used. Reloading loses any Python interpreter command-line arguments (e.g. ``-u``) because it re-executes Python using ``sys.executable`` and ``sys.argv``. Additionally, modifying these variables will cause reloading to behave incorrectly. """ from __future__ import absolute_import, division, print_function import os import sys # sys.path handling # ----------------- # # If a module is run with "python -m", the current directory (i.e. "") # is automatically prepended to sys.path, but not if it is run as # "path/to/file.py". The processing for "-m" rewrites the former to # the latter, so subsequent executions won't have the same path as the # original. # # Conversely, when run as path/to/file.py, the directory containing # file.py gets added to the path, which can cause confusion as imports # may become relative in spite of the future import. # # We address the former problem by setting the $PYTHONPATH environment # variable before re-execution so the new process will see the correct # path. We attempt to address the latter problem when tornado.autoreload # is run as __main__, although we can't fix the general case because # we cannot reliably reconstruct the original command line # (http://bugs.python.org/issue14208). if __name__ == "__main__": # This sys.path manipulation must come before our imports (as much # as possible - if we introduced a tornado.sys or tornado.os # module we'd be in trouble), or else our imports would become # relative again despite the future import. # # There is a separate __main__ block at the end of the file to call main(). if sys.path[0] == os.path.dirname(__file__): del sys.path[0] import functools import logging import os import pkgutil # type: ignore import sys import traceback import types import subprocess import weakref from tornado import ioloop from tornado.log import gen_log from tornado import process from tornado.util import exec_in try: import signal except ImportError: signal = None # os.execv is broken on Windows and can't properly parse command line # arguments and executable name if they contain whitespaces. subprocess # fixes that behavior. _has_execv = sys.platform != 'win32' _watched_files = set() _reload_hooks = [] _reload_attempted = False _io_loops = weakref.WeakKeyDictionary() # type: ignore def start(io_loop=None, check_time=500): """Begins watching source files for changes. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ io_loop = io_loop or ioloop.IOLoop.current() if io_loop in _io_loops: return _io_loops[io_loop] = True if len(_io_loops) > 1: gen_log.warning("tornado.autoreload started more than once in the same process") modify_times = {} callback = functools.partial(_reload_on_update, modify_times) scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop) scheduler.start() def wait(): """Wait for a watched file to change, then restart the process. Intended to be used at the end of scripts like unit test runners, to run the tests again after any source file changes (but see also the command-line interface in `main`) """ io_loop = ioloop.IOLoop() start(io_loop) io_loop.start() def watch(filename): """Add a file to the watch list. All imported modules are watched by default. """ _watched_files.add(filename) def add_reload_hook(fn): """Add a function to be called before reloading the process. Note that for open file and socket handles it is generally preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or ``tornado.platform.auto.set_close_exec``) instead of using a reload hook to close them. """ _reload_hooks.append(fn) def _reload_on_update(modify_times): if _reload_attempted: # We already tried to reload and it didn't work, so don't try again. return if process.task_id() is not None: # We're in a child process created by fork_processes. If child # processes restarted themselves, they'd all restart and then # all call fork_processes again. return for module in list(sys.modules.values()): # Some modules play games with sys.modules (e.g. email/__init__.py # in the standard library), and occasionally this can cause strange # failures in getattr. Just ignore anything that's not an ordinary # module. if not isinstance(module, types.ModuleType): continue path = getattr(module, "__file__", None) if not path: continue if path.endswith(".pyc") or path.endswith(".pyo"): path = path[:-1] _check_file(modify_times, path) for path in _watched_files: _check_file(modify_times, path) def _check_file(modify_times, path): try: modified = os.stat(path).st_mtime except Exception: return if path not in modify_times: modify_times[path] = modified return if modify_times[path] != modified: gen_log.info("%s modified; restarting server", path) _reload() def _reload(): global _reload_attempted _reload_attempted = True for fn in _reload_hooks: fn() if hasattr(signal, "setitimer"): # Clear the alarm signal set by # ioloop.set_blocking_log_threshold so it doesn't fire # after the exec. signal.setitimer(signal.ITIMER_REAL, 0, 0) # sys.path fixes: see comments at top of file. If sys.path[0] is an empty # string, we were (probably) invoked with -m and the effective path # is about to change on re-exec. Add the current directory to $PYTHONPATH # to ensure that the new process sees the same path we did. path_prefix = '.' + os.pathsep if (sys.path[0] == '' and not os.environ.get("PYTHONPATH", "").startswith(path_prefix)): os.environ["PYTHONPATH"] = (path_prefix + os.environ.get("PYTHONPATH", "")) if not _has_execv: subprocess.Popen([sys.executable] + sys.argv) sys.exit(0) else: try: os.execv(sys.executable, [sys.executable] + sys.argv) except OSError: # Mac OS X versions prior to 10.6 do not support execv in # a process that contains multiple threads. Instead of # re-executing in the current process, start a new one # and cause the current process to exit. This isn't # ideal since the new process is detached from the parent # terminal and thus cannot easily be killed with ctrl-C, # but it's better than not being able to autoreload at # all. # Unfortunately the errno returned in this case does not # appear to be consistent, so we can't easily check for # this error specifically. os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + sys.argv) # At this point the IOLoop has been closed and finally # blocks will experience errors if we allow the stack to # unwind, so just exit uncleanly. os._exit(0) _USAGE = """\ Usage: python -m tornado.autoreload -m module.to.run [args...] python -m tornado.autoreload path/to/script.py [args...] """ def main(): """Command-line wrapper to re-run a script whenever its source changes. Scripts may be specified by filename or module name:: python -m tornado.autoreload -m tornado.test.runtests python -m tornado.autoreload tornado/test/runtests.py Running a script with this wrapper is similar to calling `tornado.autoreload.wait` at the end of the script, but this wrapper can catch import-time problems like syntax errors that would otherwise prevent the script from reaching its call to `wait`. """ original_argv = sys.argv sys.argv = sys.argv[:] if len(sys.argv) >= 3 and sys.argv[1] == "-m": mode = "module" module = sys.argv[2] del sys.argv[1:3] elif len(sys.argv) >= 2: mode = "script" script = sys.argv[1] sys.argv = sys.argv[1:] else: print(_USAGE, file=sys.stderr) sys.exit(1) try: if mode == "module": import runpy runpy.run_module(module, run_name="__main__", alter_sys=True) elif mode == "script": with open(script) as f: # Execute the script in our namespace instead of creating # a new one so that something that tries to import __main__ # (e.g. the unittest module) will see names defined in the # script instead of just those defined in this module. global __file__ __file__ = script # If __package__ is defined, imports may be incorrectly # interpreted as relative to this module. global __package__ del __package__ exec_in(f.read(), globals(), globals()) except SystemExit as e: logging.basicConfig() gen_log.info("Script exited with status %s", e.code) except Exception as e: logging.basicConfig() gen_log.warning("Script exited with uncaught exception", exc_info=True) # If an exception occurred at import time, the file with the error # never made it into sys.modules and so we won't know to watch it. # Just to make sure we've covered everything, walk the stack trace # from the exception and watch every file. for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]): watch(filename) if isinstance(e, SyntaxError): # SyntaxErrors are special: their innermost stack frame is fake # so extract_tb won't see it and we have to get the filename # from the exception object. watch(e.filename) else: logging.basicConfig() gen_log.info("Script exited normally") # restore sys.argv so subsequent executions will include autoreload sys.argv = original_argv if mode == 'module': # runpy did a fake import of the module as __main__, but now it's # no longer in sys.modules. Figure out where it is and watch it. loader = pkgutil.get_loader(module) if loader is not None: watch(loader.get_filename()) wait() if __name__ == "__main__": # See also the other __main__ block at the top of the file, which modifies # sys.path before our imports main() tornado-4.5.3/tornado/concurrent.py000066400000000000000000000446221322420601000173710ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for working with threads and ``Futures``. ``Futures`` are a pattern for concurrent programming introduced in Python 3.2 in the `concurrent.futures` package. This package defines a mostly-compatible `Future` class designed for use from coroutines, as well as some utility functions for interacting with the `concurrent.futures` package. """ from __future__ import absolute_import, division, print_function import functools import platform import textwrap import traceback import sys from tornado.log import app_log from tornado.stack_context import ExceptionStackContext, wrap from tornado.util import raise_exc_info, ArgReplacer, is_finalizing try: from concurrent import futures except ImportError: futures = None try: import typing except ImportError: typing = None # Can the garbage collector handle cycles that include __del__ methods? # This is true in cpython beginning with version 3.4 (PEP 442). _GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and sys.version_info >= (3, 4)) class ReturnValueIgnoredError(Exception): pass # This class and associated code in the future object is derived # from the Trollius project, a backport of asyncio to Python 2.x - 3.x class _TracebackLogger(object): """Helper to log a traceback upon destruction if not cleared. This solves a nasty problem with Futures and Tasks that have an exception set: if nobody asks for the exception, the exception is never logged. This violates the Zen of Python: 'Errors should never pass silently. Unless explicitly silenced.' However, we don't want to log the exception as soon as set_exception() is called: if the calling code is written properly, it will get the exception and handle it properly. But we *do* want to log it if result() or exception() was never called -- otherwise developers waste a lot of time wondering why their buggy code fails silently. An earlier attempt added a __del__() method to the Future class itself, but this backfired because the presence of __del__() prevents garbage collection from breaking cycles. A way out of this catch-22 is to avoid having a __del__() method on the Future class itself, but instead to have a reference to a helper object with a __del__() method that logs the traceback, where we ensure that the helper object doesn't participate in cycles, and only the Future has a reference to it. The helper object is added when set_exception() is called. When the Future is collected, and the helper is present, the helper object is also collected, and its __del__() method will log the traceback. When the Future's result() or exception() method is called (and a helper object is present), it removes the the helper object, after calling its clear() method to prevent it from logging. One downside is that we do a fair amount of work to extract the traceback from the exception, even when it is never logged. It would seem cheaper to just store the exception object, but that references the traceback, which references stack frames, which may reference the Future, which references the _TracebackLogger, and then the _TracebackLogger would be included in a cycle, which is what we're trying to avoid! As an optimization, we don't immediately format the exception; we only do the work when activate() is called, which call is delayed until after all the Future's callbacks have run. Since usually a Future has at least one callback (typically set by 'yield From') and usually that callback extracts the callback, thereby removing the need to format the exception. PS. I don't claim credit for this solution. I first heard of it in a discussion about closing files when they are collected. """ __slots__ = ('exc_info', 'formatted_tb') def __init__(self, exc_info): self.exc_info = exc_info self.formatted_tb = None def activate(self): exc_info = self.exc_info if exc_info is not None: self.exc_info = None self.formatted_tb = traceback.format_exception(*exc_info) def clear(self): self.exc_info = None self.formatted_tb = None def __del__(self, is_finalizing=is_finalizing): if not is_finalizing() and self.formatted_tb: app_log.error('Future exception was never retrieved: %s', ''.join(self.formatted_tb).rstrip()) class Future(object): """Placeholder for an asynchronous result. A ``Future`` encapsulates the result of an asynchronous operation. In synchronous applications ``Futures`` are used to wait for the result from a thread or process pool; in Tornado they are normally used with `.IOLoop.add_future` or by yielding them in a `.gen.coroutine`. `tornado.concurrent.Future` is similar to `concurrent.futures.Future`, but not thread-safe (and therefore faster for use with single-threaded event loops). In addition to ``exception`` and ``set_exception``, methods ``exc_info`` and ``set_exc_info`` are supported to capture tracebacks in Python 2. The traceback is automatically available in Python 3, but in the Python 2 futures backport this information is discarded. This functionality was previously available in a separate class ``TracebackFuture``, which is now a deprecated alias for this class. .. versionchanged:: 4.0 `tornado.concurrent.Future` is always a thread-unsafe ``Future`` with support for the ``exc_info`` methods. Previously it would be an alias for the thread-safe `concurrent.futures.Future` if that package was available and fall back to the thread-unsafe implementation if it was not. .. versionchanged:: 4.1 If a `.Future` contains an error but that error is never observed (by calling ``result()``, ``exception()``, or ``exc_info()``), a stack trace will be logged when the `.Future` is garbage collected. This normally indicates an error in the application, but in cases where it results in undesired logging it may be necessary to suppress the logging by ensuring that the exception is observed: ``f.add_done_callback(lambda f: f.exception())``. """ def __init__(self): self._done = False self._result = None self._exc_info = None self._log_traceback = False # Used for Python >= 3.4 self._tb_logger = None # Used for Python <= 3.3 self._callbacks = [] # Implement the Python 3.5 Awaitable protocol if possible # (we can't use return and yield together until py33). if sys.version_info >= (3, 3): exec(textwrap.dedent(""" def __await__(self): return (yield self) """)) else: # Py2-compatible version for use with cython. def __await__(self): result = yield self # StopIteration doesn't take args before py33, # but Cython recognizes the args tuple. e = StopIteration() e.args = (result,) raise e def cancel(self): """Cancel the operation, if possible. Tornado ``Futures`` do not support cancellation, so this method always returns False. """ return False def cancelled(self): """Returns True if the operation has been cancelled. Tornado ``Futures`` do not support cancellation, so this method always returns False. """ return False def running(self): """Returns True if this operation is currently running.""" return not self._done def done(self): """Returns True if the future has finished running.""" return self._done def _clear_tb_log(self): self._log_traceback = False if self._tb_logger is not None: self._tb_logger.clear() self._tb_logger = None def result(self, timeout=None): """If the operation succeeded, return its result. If it failed, re-raise its exception. This method takes a ``timeout`` argument for compatibility with `concurrent.futures.Future` but it is an error to call it before the `Future` is done, so the ``timeout`` is never used. """ self._clear_tb_log() if self._result is not None: return self._result if self._exc_info is not None: try: raise_exc_info(self._exc_info) finally: self = None self._check_done() return self._result def exception(self, timeout=None): """If the operation raised an exception, return the `Exception` object. Otherwise returns None. This method takes a ``timeout`` argument for compatibility with `concurrent.futures.Future` but it is an error to call it before the `Future` is done, so the ``timeout`` is never used. """ self._clear_tb_log() if self._exc_info is not None: return self._exc_info[1] else: self._check_done() return None def add_done_callback(self, fn): """Attaches the given callback to the `Future`. It will be invoked with the `Future` as its argument when the Future has finished running and its result is available. In Tornado consider using `.IOLoop.add_future` instead of calling `add_done_callback` directly. """ if self._done: fn(self) else: self._callbacks.append(fn) def set_result(self, result): """Sets the result of a ``Future``. It is undefined to call any of the ``set`` methods more than once on the same object. """ self._result = result self._set_done() def set_exception(self, exception): """Sets the exception of a ``Future.``""" self.set_exc_info( (exception.__class__, exception, getattr(exception, '__traceback__', None))) def exc_info(self): """Returns a tuple in the same format as `sys.exc_info` or None. .. versionadded:: 4.0 """ self._clear_tb_log() return self._exc_info def set_exc_info(self, exc_info): """Sets the exception information of a ``Future.`` Preserves tracebacks on Python 2. .. versionadded:: 4.0 """ self._exc_info = exc_info self._log_traceback = True if not _GC_CYCLE_FINALIZERS: self._tb_logger = _TracebackLogger(exc_info) try: self._set_done() finally: # Activate the logger after all callbacks have had a # chance to call result() or exception(). if self._log_traceback and self._tb_logger is not None: self._tb_logger.activate() self._exc_info = exc_info def _check_done(self): if not self._done: raise Exception("DummyFuture does not support blocking for results") def _set_done(self): self._done = True for cb in self._callbacks: try: cb(self) except Exception: app_log.exception('Exception in callback %r for %r', cb, self) self._callbacks = None # On Python 3.3 or older, objects with a destructor part of a reference # cycle are never destroyed. It's no longer the case on Python 3.4 thanks to # the PEP 442. if _GC_CYCLE_FINALIZERS: def __del__(self, is_finalizing=is_finalizing): if is_finalizing() or not self._log_traceback: # set_exception() was not called, or result() or exception() # has consumed the exception return tb = traceback.format_exception(*self._exc_info) app_log.error('Future %r exception was never retrieved: %s', self, ''.join(tb).rstrip()) TracebackFuture = Future if futures is None: FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]] else: FUTURES = (futures.Future, Future) def is_future(x): return isinstance(x, FUTURES) class DummyExecutor(object): def submit(self, fn, *args, **kwargs): future = TracebackFuture() try: future.set_result(fn(*args, **kwargs)) except Exception: future.set_exc_info(sys.exc_info()) return future def shutdown(self, wait=True): pass dummy_executor = DummyExecutor() def run_on_executor(*args, **kwargs): """Decorator to run a synchronous method asynchronously on an executor. The decorated method may be called with a ``callback`` keyword argument and returns a future. The `.IOLoop` and executor to be used are determined by the ``io_loop`` and ``executor`` attributes of ``self``. To use different attributes, pass keyword arguments to the decorator:: @run_on_executor(executor='_thread_pool') def foo(self): pass .. versionchanged:: 4.2 Added keyword arguments to use alternative attributes. """ def run_on_executor_decorator(fn): executor = kwargs.get("executor", "executor") io_loop = kwargs.get("io_loop", "io_loop") @functools.wraps(fn) def wrapper(self, *args, **kwargs): callback = kwargs.pop("callback", None) future = getattr(self, executor).submit(fn, self, *args, **kwargs) if callback: getattr(self, io_loop).add_future( future, lambda future: callback(future.result())) return future return wrapper if args and kwargs: raise ValueError("cannot combine positional and keyword args") if len(args) == 1: return run_on_executor_decorator(args[0]) elif len(args) != 0: raise ValueError("expected 1 argument, got %d", len(args)) return run_on_executor_decorator _NO_RESULT = object() def return_future(f): """Decorator to make a function that returns via callback return a `Future`. The wrapped function should take a ``callback`` keyword argument and invoke it with one argument when it has finished. To signal failure, the function can simply raise an exception (which will be captured by the `.StackContext` and passed along to the ``Future``). From the caller's perspective, the callback argument is optional. If one is given, it will be invoked when the function is complete with `Future.result()` as an argument. If the function fails, the callback will not be run and an exception will be raised into the surrounding `.StackContext`. If no callback is given, the caller should use the ``Future`` to wait for the function to complete (perhaps by yielding it in a `.gen.engine` function, or passing it to `.IOLoop.add_future`). Usage: .. testcode:: @return_future def future_func(arg1, arg2, callback): # Do stuff (possibly asynchronous) callback(result) @gen.engine def caller(callback): yield future_func(arg1, arg2) callback() .. Note that ``@return_future`` and ``@gen.engine`` can be applied to the same function, provided ``@return_future`` appears first. However, consider using ``@gen.coroutine`` instead of this combination. """ replacer = ArgReplacer(f, 'callback') @functools.wraps(f) def wrapper(*args, **kwargs): future = TracebackFuture() callback, args, kwargs = replacer.replace( lambda value=_NO_RESULT: future.set_result(value), args, kwargs) def handle_error(typ, value, tb): future.set_exc_info((typ, value, tb)) return True exc_info = None with ExceptionStackContext(handle_error): try: result = f(*args, **kwargs) if result is not None: raise ReturnValueIgnoredError( "@return_future should not be used with functions " "that return values") except: exc_info = sys.exc_info() raise if exc_info is not None: # If the initial synchronous part of f() raised an exception, # go ahead and raise it to the caller directly without waiting # for them to inspect the Future. future.result() # If the caller passed in a callback, schedule it to be called # when the future resolves. It is important that this happens # just before we return the future, or else we risk confusing # stack contexts with multiple exceptions (one here with the # immediate exception, and again when the future resolves and # the callback triggers its exception by calling future.result()). if callback is not None: def run_callback(future): result = future.result() if result is _NO_RESULT: callback() else: callback(future.result()) future.add_done_callback(wrap(run_callback)) return future return wrapper def chain_future(a, b): """Chain two futures together so that when one completes, so does the other. The result (success or failure) of ``a`` will be copied to ``b``, unless ``b`` has already been completed or cancelled by the time ``a`` finishes. """ def copy(future): assert future is a if b.done(): return if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture) and a.exc_info() is not None): b.set_exc_info(a.exc_info()) elif a.exception() is not None: b.set_exception(a.exception()) else: b.set_result(a.result()) a.add_done_callback(copy) tornado-4.5.3/tornado/curl_httpclient.py000066400000000000000000000547101322420601000204110ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Non-blocking HTTP client implementation using pycurl.""" from __future__ import absolute_import, division, print_function import collections import functools import logging import pycurl # type: ignore import threading import time from io import BytesIO from tornado import httputil from tornado import ioloop from tornado import stack_context from tornado.escape import utf8, native_str from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main curl_log = logging.getLogger('tornado.curl_httpclient') class CurlAsyncHTTPClient(AsyncHTTPClient): def initialize(self, io_loop, max_clients=10, defaults=None): super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) self._multi = pycurl.CurlMulti() self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) self._curls = [self._curl_create() for i in range(max_clients)] self._free_list = self._curls[:] self._requests = collections.deque() self._fds = {} self._timeout = None # libcurl has bugs that sometimes cause it to not report all # relevant file descriptors and timeouts to TIMERFUNCTION/ # SOCKETFUNCTION. Mitigate the effects of such bugs by # forcing a periodic scan of all active requests. self._force_timeout_callback = ioloop.PeriodicCallback( self._handle_force_timeout, 1000, io_loop=io_loop) self._force_timeout_callback.start() # Work around a bug in libcurl 7.29.0: Some fields in the curl # multi object are initialized lazily, and its destructor will # segfault if it is destroyed without having been used. Add # and remove a dummy handle to make sure everything is # initialized. dummy_curl_handle = pycurl.Curl() self._multi.add_handle(dummy_curl_handle) self._multi.remove_handle(dummy_curl_handle) def close(self): self._force_timeout_callback.stop() if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) for curl in self._curls: curl.close() self._multi.close() super(CurlAsyncHTTPClient, self).close() def fetch_impl(self, request, callback): self._requests.append((request, callback)) self._process_queue() self._set_timeout(0) def _handle_socket(self, event, fd, multi, data): """Called by libcurl when it wants to change the file descriptors it cares about. """ event_map = { pycurl.POLL_NONE: ioloop.IOLoop.NONE, pycurl.POLL_IN: ioloop.IOLoop.READ, pycurl.POLL_OUT: ioloop.IOLoop.WRITE, pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE } if event == pycurl.POLL_REMOVE: if fd in self._fds: self.io_loop.remove_handler(fd) del self._fds[fd] else: ioloop_event = event_map[event] # libcurl sometimes closes a socket and then opens a new # one using the same FD without giving us a POLL_NONE in # between. This is a problem with the epoll IOLoop, # because the kernel can tell when a socket is closed and # removes it from the epoll automatically, causing future # update_handler calls to fail. Since we can't tell when # this has happened, always use remove and re-add # instead of update. if fd in self._fds: self.io_loop.remove_handler(fd) self.io_loop.add_handler(fd, self._handle_events, ioloop_event) self._fds[fd] = ioloop_event def _set_timeout(self, msecs): """Called by libcurl to schedule a timeout.""" if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = self.io_loop.add_timeout( self.io_loop.time() + msecs / 1000.0, self._handle_timeout) def _handle_events(self, fd, events): """Called by IOLoop when there is activity on one of our file descriptors. """ action = 0 if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT while True: try: ret, num_handles = self._multi.socket_action(fd, action) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() def _handle_timeout(self): """Called by IOLoop when the requested timeout has passed.""" with stack_context.NullContext(): self._timeout = None while True: try: ret, num_handles = self._multi.socket_action( pycurl.SOCKET_TIMEOUT, 0) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() # In theory, we shouldn't have to do this because curl will # call _set_timeout whenever the timeout changes. However, # sometimes after _handle_timeout we will need to reschedule # immediately even though nothing has changed from curl's # perspective. This is because when socket_action is # called with SOCKET_TIMEOUT, libcurl decides internally which # timeouts need to be processed by using a monotonic clock # (where available) while tornado uses python's time.time() # to decide when timeouts have occurred. When those clocks # disagree on elapsed time (as they will whenever there is an # NTP adjustment), tornado might call _handle_timeout before # libcurl is ready. After each timeout, resync the scheduled # timeout with libcurl's current state. new_timeout = self._multi.timeout() if new_timeout >= 0: self._set_timeout(new_timeout) def _handle_force_timeout(self): """Called by IOLoop periodically to ask libcurl to process any events it may have forgotten about. """ with stack_context.NullContext(): while True: try: ret, num_handles = self._multi.socket_all() except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() def _finish_pending_requests(self): """Process any requests that were completed by the last call to multi.socket_action. """ while True: num_q, ok_list, err_list = self._multi.info_read() for curl in ok_list: self._finish(curl) for curl, errnum, errmsg in err_list: self._finish(curl, errnum, errmsg) if num_q == 0: break self._process_queue() def _process_queue(self): with stack_context.NullContext(): while True: started = 0 while self._free_list and self._requests: started += 1 curl = self._free_list.pop() (request, callback) = self._requests.popleft() curl.info = { "headers": httputil.HTTPHeaders(), "buffer": BytesIO(), "request": request, "callback": callback, "curl_start_time": time.time(), } try: self._curl_setup_request( curl, request, curl.info["buffer"], curl.info["headers"]) except Exception as e: # If there was an error in setup, pass it on # to the callback. Note that allowing the # error to escape here will appear to work # most of the time since we are still in the # caller's original stack frame, but when # _process_queue() is called from # _finish_pending_requests the exceptions have # nowhere to go. self._free_list.append(curl) callback(HTTPResponse( request=request, code=599, error=e)) else: self._multi.add_handle(curl) if not started: break def _finish(self, curl, curl_error=None, curl_message=None): info = curl.info curl.info = None self._multi.remove_handle(curl) self._free_list.append(curl) buffer = info["buffer"] if curl_error: error = CurlError(curl_error, curl_message) code = error.code effective_url = None buffer.close() buffer = None else: error = None code = curl.getinfo(pycurl.HTTP_CODE) effective_url = curl.getinfo(pycurl.EFFECTIVE_URL) buffer.seek(0) # the various curl timings are documented at # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html time_info = dict( queue=info["curl_start_time"] - info["request"].start_time, namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME), connect=curl.getinfo(pycurl.CONNECT_TIME), pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME), starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME), total=curl.getinfo(pycurl.TOTAL_TIME), redirect=curl.getinfo(pycurl.REDIRECT_TIME), ) try: info["callback"](HTTPResponse( request=info["request"], code=code, headers=info["headers"], buffer=buffer, effective_url=effective_url, error=error, reason=info['headers'].get("X-Http-Reason", None), request_time=time.time() - info["curl_start_time"], time_info=time_info)) except Exception: self.handle_callback_exception(info["callback"]) def handle_callback_exception(self, callback): self.io_loop.handle_callback_exception(callback) def _curl_create(self): curl = pycurl.Curl() if curl_log.isEnabledFor(logging.DEBUG): curl.setopt(pycurl.VERBOSE, 1) curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug) if hasattr(pycurl, 'PROTOCOLS'): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12) curl.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) curl.setopt(pycurl.REDIR_PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) return curl def _curl_setup_request(self, curl, request, buffer, headers): curl.setopt(pycurl.URL, native_str(request.url)) # libcurl's magic "Expect: 100-continue" behavior causes delays # with servers that don't support it (which include, among others, # Google's OpenID endpoint). Additionally, this behavior has # a bug in conjunction with the curl_multi_socket_action API # (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976), # which increases the delays. It's more trouble than it's worth, # so just turn off the feature (yes, setting Expect: to an empty # value is the official way to disable this) if "Expect" not in request.headers: request.headers["Expect"] = "" # libcurl adds Pragma: no-cache by default; disable that too if "Pragma" not in request.headers: request.headers["Pragma"] = "" curl.setopt(pycurl.HTTPHEADER, ["%s: %s" % (native_str(k), native_str(v)) for k, v in request.headers.get_all()]) curl.setopt(pycurl.HEADERFUNCTION, functools.partial(self._curl_header_callback, headers, request.header_callback)) if request.streaming_callback: def write_function(chunk): self.io_loop.add_callback(request.streaming_callback, chunk) else: write_function = buffer.write if bytes is str: # py2 curl.setopt(pycurl.WRITEFUNCTION, write_function) else: # py3 # Upstream pycurl doesn't support py3, but ubuntu 12.10 includes # a fork/port. That version has a bug in which it passes unicode # strings instead of bytes to the WRITEFUNCTION. This means that # if you use a WRITEFUNCTION (which tornado always does), you cannot # download arbitrary binary data. This needs to be fixed in the # ported pycurl package, but in the meantime this lambda will # make it work for downloading (utf8) text. curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s))) curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects) curl.setopt(pycurl.MAXREDIRS, request.max_redirects) curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout)) curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout)) if request.user_agent: curl.setopt(pycurl.USERAGENT, native_str(request.user_agent)) else: curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)") if request.network_interface: curl.setopt(pycurl.INTERFACE, request.network_interface) if request.decompress_response: curl.setopt(pycurl.ENCODING, "gzip,deflate") else: curl.setopt(pycurl.ENCODING, "none") if request.proxy_host and request.proxy_port: curl.setopt(pycurl.PROXY, request.proxy_host) curl.setopt(pycurl.PROXYPORT, request.proxy_port) if request.proxy_username: credentials = '%s:%s' % (request.proxy_username, request.proxy_password) curl.setopt(pycurl.PROXYUSERPWD, credentials) if (request.proxy_auth_mode is None or request.proxy_auth_mode == "basic"): curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_BASIC) elif request.proxy_auth_mode == "digest": curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_DIGEST) else: raise ValueError( "Unsupported proxy_auth_mode %s" % request.proxy_auth_mode) else: curl.setopt(pycurl.PROXY, '') curl.unsetopt(pycurl.PROXYUSERPWD) if request.validate_cert: curl.setopt(pycurl.SSL_VERIFYPEER, 1) curl.setopt(pycurl.SSL_VERIFYHOST, 2) else: curl.setopt(pycurl.SSL_VERIFYPEER, 0) curl.setopt(pycurl.SSL_VERIFYHOST, 0) if request.ca_certs is not None: curl.setopt(pycurl.CAINFO, request.ca_certs) else: # There is no way to restore pycurl.CAINFO to its default value # (Using unsetopt makes it reject all certificates). # I don't see any way to read the default value from python so it # can be restored later. We'll have to just leave CAINFO untouched # if no ca_certs file was specified, and require that if any # request uses a custom ca_certs file, they all must. pass if request.allow_ipv6 is False: # Curl behaves reasonably when DNS resolution gives an ipv6 address # that we can't reach, so allow ipv6 unless the user asks to disable. curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) else: curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER) # Set the request method through curl's irritating interface which makes # up names for almost every single method curl_options = { "GET": pycurl.HTTPGET, "POST": pycurl.POST, "PUT": pycurl.UPLOAD, "HEAD": pycurl.NOBODY, } custom_methods = set(["DELETE", "OPTIONS", "PATCH"]) for o in curl_options.values(): curl.setopt(o, False) if request.method in curl_options: curl.unsetopt(pycurl.CUSTOMREQUEST) curl.setopt(curl_options[request.method], True) elif request.allow_nonstandard_methods or request.method in custom_methods: curl.setopt(pycurl.CUSTOMREQUEST, request.method) else: raise KeyError('unknown method ' + request.method) body_expected = request.method in ("POST", "PATCH", "PUT") body_present = request.body is not None if not request.allow_nonstandard_methods: # Some HTTP methods nearly always have bodies while others # almost never do. Fail in this case unless the user has # opted out of sanity checks with allow_nonstandard_methods. if ((body_expected and not body_present) or (body_present and not body_expected)): raise ValueError( 'Body must %sbe None for method %s (unless ' 'allow_nonstandard_methods is true)' % ('not ' if body_expected else '', request.method)) if body_expected or body_present: if request.method == "GET": # Even with `allow_nonstandard_methods` we disallow # GET with a body (because libcurl doesn't allow it # unless we use CUSTOMREQUEST). While the spec doesn't # forbid clients from sending a body, it arguably # disallows the server from doing anything with them. raise ValueError('Body must be None for GET request') request_buffer = BytesIO(utf8(request.body or '')) def ioctl(cmd): if cmd == curl.IOCMD_RESTARTREAD: request_buffer.seek(0) curl.setopt(pycurl.READFUNCTION, request_buffer.read) curl.setopt(pycurl.IOCTLFUNCTION, ioctl) if request.method == "POST": curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or '')) else: curl.setopt(pycurl.UPLOAD, True) curl.setopt(pycurl.INFILESIZE, len(request.body or '')) if request.auth_username is not None: userpwd = "%s:%s" % (request.auth_username, request.auth_password or '') if request.auth_mode is None or request.auth_mode == "basic": curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) elif request.auth_mode == "digest": curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST) else: raise ValueError("Unsupported auth_mode %s" % request.auth_mode) curl.setopt(pycurl.USERPWD, native_str(userpwd)) curl_log.debug("%s %s (username: %r)", request.method, request.url, request.auth_username) else: curl.unsetopt(pycurl.USERPWD) curl_log.debug("%s %s", request.method, request.url) if request.client_cert is not None: curl.setopt(pycurl.SSLCERT, request.client_cert) if request.client_key is not None: curl.setopt(pycurl.SSLKEY, request.client_key) if request.ssl_options is not None: raise ValueError("ssl_options not supported in curl_httpclient") if threading.activeCount() > 1: # libcurl/pycurl is not thread-safe by default. When multiple threads # are used, signals should be disabled. This has the side effect # of disabling DNS timeouts in some environments (when libcurl is # not linked against ares), so we don't do it when there is only one # thread. Applications that use many short-lived threads may need # to set NOSIGNAL manually in a prepare_curl_callback since # there may not be any other threads running at the time we call # threading.activeCount. curl.setopt(pycurl.NOSIGNAL, 1) if request.prepare_curl_callback is not None: request.prepare_curl_callback(curl) def _curl_header_callback(self, headers, header_callback, header_line): header_line = native_str(header_line.decode('latin1')) if header_callback is not None: self.io_loop.add_callback(header_callback, header_line) # header_line as returned by curl includes the end-of-line characters. # whitespace at the start should be preserved to allow multi-line headers header_line = header_line.rstrip() if header_line.startswith("HTTP/"): headers.clear() try: (__, __, reason) = httputil.parse_response_start_line(header_line) header_line = "X-Http-Reason: %s" % reason except httputil.HTTPInputError: return if not header_line: return headers.parse_line(header_line) def _curl_debug(self, debug_type, debug_msg): debug_types = ('I', '<', '>', '<', '>') debug_msg = native_str(debug_msg) if debug_type == 0: curl_log.debug('%s', debug_msg.strip()) elif debug_type in (1, 2): for line in debug_msg.splitlines(): curl_log.debug('%s %s', debug_types[debug_type], line) elif debug_type == 4: curl_log.debug('%s %r', debug_types[debug_type], debug_msg) class CurlError(HTTPError): def __init__(self, errno, message): HTTPError.__init__(self, 599, message) self.errno = errno if __name__ == "__main__": AsyncHTTPClient.configure(CurlAsyncHTTPClient) main() tornado-4.5.3/tornado/escape.py000066400000000000000000000340711322420601000164440ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Escaping/unescaping methods for HTML, JSON, URLs, and others. Also includes a few other miscellaneous string manipulation functions that have crept in over time. """ from __future__ import absolute_import, division, print_function import json import re from tornado.util import PY3, unicode_type, basestring_type if PY3: from urllib.parse import parse_qs as _parse_qs import html.entities as htmlentitydefs import urllib.parse as urllib_parse unichr = chr else: from urlparse import parse_qs as _parse_qs import htmlentitydefs import urllib as urllib_parse try: import typing # noqa except ImportError: pass _XHTML_ESCAPE_RE = re.compile('[&<>"\']') _XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"', '\'': '''} def xhtml_escape(value): """Escapes a string so it is valid within HTML or XML. Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``. When used in attribute values the escaped strings must be enclosed in quotes. .. versionchanged:: 3.2 Added the single quote to the list of escaped characters. """ return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)], to_basestring(value)) def xhtml_unescape(value): """Un-escapes an XML-escaped string.""" return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value)) # The fact that json_encode wraps json.dumps is an implementation detail. # Please see https://github.com/tornadoweb/tornado/pull/706 # before sending a pull request that adds **kwargs to this function. def json_encode(value): """JSON-encodes the given Python object.""" # JSON permits but does not require forward slashes to be escaped. # This is useful when json data is emitted in a tags from prematurely terminating # the javascript. Some json libraries do this escaping by default, # although python's standard library does not, so we do it here. # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped return json.dumps(value).replace("typing.Union[bytes,None] """Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8. """ if isinstance(value, _UTF8_TYPES): return value if not isinstance(value, unicode_type): raise TypeError( "Expected bytes, unicode, or None; got %r" % type(value) ) return value.encode("utf-8") _TO_UNICODE_TYPES = (unicode_type, type(None)) def to_unicode(value): """Converts a string argument to a unicode string. If the argument is already a unicode string or None, it is returned unchanged. Otherwise it must be a byte string and is decoded as utf8. """ if isinstance(value, _TO_UNICODE_TYPES): return value if not isinstance(value, bytes): raise TypeError( "Expected bytes, unicode, or None; got %r" % type(value) ) return value.decode("utf-8") # to_unicode was previously named _unicode not because it was private, # but to avoid conflicts with the built-in unicode() function/type _unicode = to_unicode # When dealing with the standard library across python 2 and 3 it is # sometimes useful to have a direct conversion to the native string type if str is unicode_type: native_str = to_unicode else: native_str = utf8 _BASESTRING_TYPES = (basestring_type, type(None)) def to_basestring(value): """Converts a string argument to a subclass of basestring. In python2, byte and unicode strings are mostly interchangeable, so functions that deal with a user-supplied argument in combination with ascii string constants can use either and should return the type the user supplied. In python3, the two types are not interchangeable, so this method is needed to convert byte strings to unicode. """ if isinstance(value, _BASESTRING_TYPES): return value if not isinstance(value, bytes): raise TypeError( "Expected bytes, unicode, or None; got %r" % type(value) ) return value.decode("utf-8") def recursive_unicode(obj): """Walks a simple data structure, converting byte strings to unicode. Supports lists, tuples, and dictionaries. """ if isinstance(obj, dict): return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items()) elif isinstance(obj, list): return list(recursive_unicode(i) for i in obj) elif isinstance(obj, tuple): return tuple(recursive_unicode(i) for i in obj) elif isinstance(obj, bytes): return to_unicode(obj) else: return obj # I originally used the regex from # http://daringfireball.net/2010/07/improved_regex_for_matching_urls # but it gets all exponential on certain patterns (such as too many trailing # dots), causing the regex matcher to never return. # This regex should avoid those problems. # Use to_unicode instead of tornado.util.u - we don't want backslashes getting # processed as escapes. _URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""")) def linkify(text, shorten=False, extra_params="", require_protocol=False, permitted_protocols=["http", "https"]): """Converts plain text into HTML with links. For example: ``linkify("Hello http://tornadoweb.org!")`` would return ``Hello http://tornadoweb.org!`` Parameters: * ``shorten``: Long urls will be shortened for display. * ``extra_params``: Extra text to include in the link tag, or a callable taking the link as an argument and returning the extra text e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, or:: def extra_params_cb(url): if url.startswith("http://example.com"): return 'class="internal"' else: return 'class="external" rel="nofollow"' linkify(text, extra_params=extra_params_cb) * ``require_protocol``: Only linkify urls which include a protocol. If this is False, urls such as www.facebook.com will also be linkified. * ``permitted_protocols``: List (or set) of protocols which should be linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", "mailto"])``. It is very unsafe to include protocols such as ``javascript``. """ if extra_params and not callable(extra_params): extra_params = " " + extra_params.strip() def make_link(m): url = m.group(1) proto = m.group(2) if require_protocol and not proto: return url # not protocol, no linkify if proto and proto not in permitted_protocols: return url # bad protocol, no linkify href = m.group(1) if not proto: href = "http://" + href # no proto specified, use http if callable(extra_params): params = " " + extra_params(href).strip() else: params = extra_params # clip long urls. max_len is just an approximation max_len = 30 if shorten and len(url) > max_len: before_clip = url if proto: proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for : else: proto_len = 0 parts = url[proto_len:].split("/") if len(parts) > 1: # Grab the whole host part plus the first bit of the path # The path is usually not that interesting once shortened # (no more slug, etc), so it really just provides a little # extra indication of shortening. url = url[:proto_len] + parts[0] + "/" + \ parts[1][:8].split('?')[0].split('.')[0] if len(url) > max_len * 1.5: # still too long url = url[:max_len] if url != before_clip: amp = url.rfind('&') # avoid splitting html char entities if amp > max_len - 5: url = url[:amp] url += "..." if len(url) >= len(before_clip): url = before_clip else: # full url is visible on mouse-over (for those who don't # have a status bar, such as Safari by default) params += ' title="%s"' % href return u'%s' % (href, params, url) # First HTML-escape so that our strings are all safe. # The regex is modified to avoid character entites other than & so # that we won't pick up ", etc. text = _unicode(xhtml_escape(text)) return _URL_RE.sub(make_link, text) def _convert_entity(m): if m.group(1) == "#": try: if m.group(2)[:1].lower() == 'x': return unichr(int(m.group(2)[1:], 16)) else: return unichr(int(m.group(2))) except ValueError: return "&#%s;" % m.group(2) try: return _HTML_UNICODE_MAP[m.group(2)] except KeyError: return "&%s;" % m.group(2) def _build_unicode_map(): unicode_map = {} for name, value in htmlentitydefs.name2codepoint.items(): unicode_map[name] = unichr(value) return unicode_map _HTML_UNICODE_MAP = _build_unicode_map() tornado-4.5.3/tornado/gen.py000066400000000000000000001325041322420601000157550ustar00rootroot00000000000000"""``tornado.gen`` is a generator-based interface to make it easier to work in an asynchronous environment. Code using the ``gen`` module is technically asynchronous, but it is written as a single generator instead of a collection of separate functions. For example, the following asynchronous handler: .. testcode:: class AsyncHandler(RequestHandler): @asynchronous def get(self): http_client = AsyncHTTPClient() http_client.fetch("http://example.com", callback=self.on_fetch) def on_fetch(self, response): do_something_with_response(response) self.render("template.html") .. testoutput:: :hide: could be written with ``gen`` as: .. testcode:: class GenAsyncHandler(RequestHandler): @gen.coroutine def get(self): http_client = AsyncHTTPClient() response = yield http_client.fetch("http://example.com") do_something_with_response(response) self.render("template.html") .. testoutput:: :hide: Most asynchronous functions in Tornado return a `.Future`; yielding this object returns its `~.Future.result`. You can also yield a list or dict of ``Futures``, which will be started at the same time and run in parallel; a list or dict of results will be returned when they are all finished: .. testcode:: @gen.coroutine def get(self): http_client = AsyncHTTPClient() response1, response2 = yield [http_client.fetch(url1), http_client.fetch(url2)] response_dict = yield dict(response3=http_client.fetch(url3), response4=http_client.fetch(url4)) response3 = response_dict['response3'] response4 = response_dict['response4'] .. testoutput:: :hide: If the `~functools.singledispatch` library is available (standard in Python 3.4, available via the `singledispatch `_ package on older versions), additional types of objects may be yielded. Tornado includes support for ``asyncio.Future`` and Twisted's ``Deferred`` class when ``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported. See the `convert_yielded` function to extend this mechanism. .. versionchanged:: 3.2 Dict support added. .. versionchanged:: 4.1 Support added for yielding ``asyncio`` Futures and Twisted Deferreds via ``singledispatch``. """ from __future__ import absolute_import, division, print_function import collections import functools import itertools import os import sys import textwrap import types import weakref from tornado.concurrent import Future, TracebackFuture, is_future, chain_future from tornado.ioloop import IOLoop from tornado.log import app_log from tornado import stack_context from tornado.util import PY3, raise_exc_info try: try: # py34+ from functools import singledispatch # type: ignore except ImportError: from singledispatch import singledispatch # backport except ImportError: # In most cases, singledispatch is required (to avoid # difficult-to-diagnose problems in which the functionality # available differs depending on which invisble packages are # installed). However, in Google App Engine third-party # dependencies are more trouble so we allow this module to be # imported without it. if 'APPENGINE_RUNTIME' not in os.environ: raise singledispatch = None try: try: # py35+ from collections.abc import Generator as GeneratorType # type: ignore except ImportError: from backports_abc import Generator as GeneratorType # type: ignore try: # py35+ from inspect import isawaitable # type: ignore except ImportError: from backports_abc import isawaitable except ImportError: if 'APPENGINE_RUNTIME' not in os.environ: raise from types import GeneratorType def isawaitable(x): # type: ignore return False if PY3: import builtins else: import __builtin__ as builtins class KeyReuseError(Exception): pass class UnknownKeyError(Exception): pass class LeakedCallbackError(Exception): pass class BadYieldError(Exception): pass class ReturnValueIgnoredError(Exception): pass class TimeoutError(Exception): """Exception raised by ``with_timeout``.""" def _value_from_stopiteration(e): try: # StopIteration has a value attribute beginning in py33. # So does our Return class. return e.value except AttributeError: pass try: # Cython backports coroutine functionality by putting the value in # e.args[0]. return e.args[0] except (AttributeError, IndexError): return None def engine(func): """Callback-oriented decorator for asynchronous generators. This is an older interface; for new code that does not need to be compatible with versions of Tornado older than 3.0 the `coroutine` decorator is recommended instead. This decorator is similar to `coroutine`, except it does not return a `.Future` and the ``callback`` argument is not treated specially. In most cases, functions decorated with `engine` should take a ``callback`` argument and invoke it with their result when they are finished. One notable exception is the `~tornado.web.RequestHandler` :ref:`HTTP verb methods `, which use ``self.finish()`` in place of a callback argument. """ func = _make_coroutine_wrapper(func, replace_callback=False) @functools.wraps(func) def wrapper(*args, **kwargs): future = func(*args, **kwargs) def final_callback(future): if future.result() is not None: raise ReturnValueIgnoredError( "@gen.engine functions cannot return values: %r" % (future.result(),)) # The engine interface doesn't give us any way to return # errors but to raise them into the stack context. # Save the stack context here to use when the Future has resolved. future.add_done_callback(stack_context.wrap(final_callback)) return wrapper def coroutine(func, replace_callback=True): """Decorator for asynchronous generators. Any generator that yields objects from this module must be wrapped in either this decorator or `engine`. Coroutines may "return" by raising the special exception `Return(value) `. In Python 3.3+, it is also possible for the function to simply use the ``return value`` statement (prior to Python 3.3 generators were not allowed to also return values). In all versions of Python a coroutine that simply wishes to exit early may use the ``return`` statement without a value. Functions with this decorator return a `.Future`. Additionally, they may be called with a ``callback`` keyword argument, which will be invoked with the future's result when it resolves. If the coroutine fails, the callback will not be run and an exception will be raised into the surrounding `.StackContext`. The ``callback`` argument is not visible inside the decorated function; it is handled by the decorator itself. From the caller's perspective, ``@gen.coroutine`` is similar to the combination of ``@return_future`` and ``@gen.engine``. .. warning:: When exceptions occur inside a coroutine, the exception information will be stored in the `.Future` object. You must examine the result of the `.Future` object, or the exception may go unnoticed by your code. This means yielding the function if called from another coroutine, using something like `.IOLoop.run_sync` for top-level calls, or passing the `.Future` to `.IOLoop.add_future`. """ return _make_coroutine_wrapper(func, replace_callback=True) # Ties lifetime of runners to their result futures. Github Issue #1769 # Generators, like any object in Python, must be strong referenced # in order to not be cleaned up by the garbage collector. When using # coroutines, the Runner object is what strong-refs the inner # generator. However, the only item that strong-reffed the Runner # was the last Future that the inner generator yielded (via the # Future's internal done_callback list). Usually this is enough, but # it is also possible for this Future to not have any strong references # other than other objects referenced by the Runner object (usually # when using other callback patterns and/or weakrefs). In this # situation, if a garbage collection ran, a cycle would be detected and # Runner objects could be destroyed along with their inner generators # and everything in their local scope. # This map provides strong references to Runner objects as long as # their result future objects also have strong references (typically # from the parent coroutine's Runner). This keeps the coroutine's # Runner alive. _futures_to_runners = weakref.WeakKeyDictionary() def _make_coroutine_wrapper(func, replace_callback): """The inner workings of ``@gen.coroutine`` and ``@gen.engine``. The two decorators differ in their treatment of the ``callback`` argument, so we cannot simply implement ``@engine`` in terms of ``@coroutine``. """ # On Python 3.5, set the coroutine flag on our generator, to allow it # to be used with 'await'. wrapped = func if hasattr(types, 'coroutine'): func = types.coroutine(func) @functools.wraps(wrapped) def wrapper(*args, **kwargs): future = TracebackFuture() if replace_callback and 'callback' in kwargs: callback = kwargs.pop('callback') IOLoop.current().add_future( future, lambda future: callback(future.result())) try: result = func(*args, **kwargs) except (Return, StopIteration) as e: result = _value_from_stopiteration(e) except Exception: future.set_exc_info(sys.exc_info()) return future else: if isinstance(result, GeneratorType): # Inline the first iteration of Runner.run. This lets us # avoid the cost of creating a Runner when the coroutine # never actually yields, which in turn allows us to # use "optional" coroutines in critical path code without # performance penalty for the synchronous case. try: orig_stack_contexts = stack_context._state.contexts yielded = next(result) if stack_context._state.contexts is not orig_stack_contexts: yielded = TracebackFuture() yielded.set_exception( stack_context.StackContextInconsistentError( 'stack_context inconsistency (probably caused ' 'by yield within a "with StackContext" block)')) except (StopIteration, Return) as e: future.set_result(_value_from_stopiteration(e)) except Exception: future.set_exc_info(sys.exc_info()) else: _futures_to_runners[future] = Runner(result, future, yielded) yielded = None try: return future finally: # Subtle memory optimization: if next() raised an exception, # the future's exc_info contains a traceback which # includes this stack frame. This creates a cycle, # which will be collected at the next full GC but has # been shown to greatly increase memory usage of # benchmarks (relative to the refcount-based scheme # used in the absence of cycles). We can avoid the # cycle by clearing the local variable after we return it. future = None future.set_result(result) return future wrapper.__wrapped__ = wrapped wrapper.__tornado_coroutine__ = True return wrapper def is_coroutine_function(func): """Return whether *func* is a coroutine function, i.e. a function wrapped with `~.gen.coroutine`. .. versionadded:: 4.5 """ return getattr(func, '__tornado_coroutine__', False) class Return(Exception): """Special exception to return a value from a `coroutine`. If this exception is raised, its value argument is used as the result of the coroutine:: @gen.coroutine def fetch_json(url): response = yield AsyncHTTPClient().fetch(url) raise gen.Return(json_decode(response.body)) In Python 3.3, this exception is no longer necessary: the ``return`` statement can be used directly to return a value (previously ``yield`` and ``return`` with a value could not be combined in the same function). By analogy with the return statement, the value argument is optional, but it is never necessary to ``raise gen.Return()``. The ``return`` statement can be used with no arguments instead. """ def __init__(self, value=None): super(Return, self).__init__() self.value = value # Cython recognizes subclasses of StopIteration with a .args tuple. self.args = (value,) class WaitIterator(object): """Provides an iterator to yield the results of futures as they finish. Yielding a set of futures like this: ``results = yield [future1, future2]`` pauses the coroutine until both ``future1`` and ``future2`` return, and then restarts the coroutine with the results of both futures. If either future is an exception, the expression will raise that exception and all the results will be lost. If you need to get the result of each future as soon as possible, or if you need the result of some futures even if others produce errors, you can use ``WaitIterator``:: wait_iterator = gen.WaitIterator(future1, future2) while not wait_iterator.done(): try: result = yield wait_iterator.next() except Exception as e: print("Error {} from {}".format(e, wait_iterator.current_future)) else: print("Result {} received from {} at {}".format( result, wait_iterator.current_future, wait_iterator.current_index)) Because results are returned as soon as they are available the output from the iterator *will not be in the same order as the input arguments*. If you need to know which future produced the current result, you can use the attributes ``WaitIterator.current_future``, or ``WaitIterator.current_index`` to get the index of the future from the input list. (if keyword arguments were used in the construction of the `WaitIterator`, ``current_index`` will use the corresponding keyword). On Python 3.5, `WaitIterator` implements the async iterator protocol, so it can be used with the ``async for`` statement (note that in this version the entire iteration is aborted if any value raises an exception, while the previous example can continue past individual errors):: async for result in gen.WaitIterator(future1, future2): print("Result {} received from {} at {}".format( result, wait_iterator.current_future, wait_iterator.current_index)) .. versionadded:: 4.1 .. versionchanged:: 4.3 Added ``async for`` support in Python 3.5. """ def __init__(self, *args, **kwargs): if args and kwargs: raise ValueError( "You must provide args or kwargs, not both") if kwargs: self._unfinished = dict((f, k) for (k, f) in kwargs.items()) futures = list(kwargs.values()) else: self._unfinished = dict((f, i) for (i, f) in enumerate(args)) futures = args self._finished = collections.deque() self.current_index = self.current_future = None self._running_future = None for future in futures: future.add_done_callback(self._done_callback) def done(self): """Returns True if this iterator has no more results.""" if self._finished or self._unfinished: return False # Clear the 'current' values when iteration is done. self.current_index = self.current_future = None return True def next(self): """Returns a `.Future` that will yield the next available result. Note that this `.Future` will not be the same object as any of the inputs. """ self._running_future = TracebackFuture() if self._finished: self._return_result(self._finished.popleft()) return self._running_future def _done_callback(self, done): if self._running_future and not self._running_future.done(): self._return_result(done) else: self._finished.append(done) def _return_result(self, done): """Called set the returned future's state that of the future we yielded, and set the current future for the iterator. """ chain_future(done, self._running_future) self.current_future = done self.current_index = self._unfinished.pop(done) @coroutine def __aiter__(self): raise Return(self) def __anext__(self): if self.done(): # Lookup by name to silence pyflakes on older versions. raise getattr(builtins, 'StopAsyncIteration')() return self.next() class YieldPoint(object): """Base class for objects that may be yielded from the generator. .. deprecated:: 4.0 Use `Futures <.Future>` instead. """ def start(self, runner): """Called by the runner after the generator has yielded. No other methods will be called on this object before ``start``. """ raise NotImplementedError() def is_ready(self): """Called by the runner to determine whether to resume the generator. Returns a boolean; may be called more than once. """ raise NotImplementedError() def get_result(self): """Returns the value to use as the result of the yield expression. This method will only be called once, and only after `is_ready` has returned true. """ raise NotImplementedError() class Callback(YieldPoint): """Returns a callable object that will allow a matching `Wait` to proceed. The key may be any value suitable for use as a dictionary key, and is used to match ``Callbacks`` to their corresponding ``Waits``. The key must be unique among outstanding callbacks within a single run of the generator function, but may be reused across different runs of the same function (so constants generally work fine). The callback may be called with zero or one arguments; if an argument is given it will be returned by `Wait`. .. deprecated:: 4.0 Use `Futures <.Future>` instead. """ def __init__(self, key): self.key = key def start(self, runner): self.runner = runner runner.register_callback(self.key) def is_ready(self): return True def get_result(self): return self.runner.result_callback(self.key) class Wait(YieldPoint): """Returns the argument passed to the result of a previous `Callback`. .. deprecated:: 4.0 Use `Futures <.Future>` instead. """ def __init__(self, key): self.key = key def start(self, runner): self.runner = runner def is_ready(self): return self.runner.is_ready(self.key) def get_result(self): return self.runner.pop_result(self.key) class WaitAll(YieldPoint): """Returns the results of multiple previous `Callbacks `. The argument is a sequence of `Callback` keys, and the result is a list of results in the same order. `WaitAll` is equivalent to yielding a list of `Wait` objects. .. deprecated:: 4.0 Use `Futures <.Future>` instead. """ def __init__(self, keys): self.keys = keys def start(self, runner): self.runner = runner def is_ready(self): return all(self.runner.is_ready(key) for key in self.keys) def get_result(self): return [self.runner.pop_result(key) for key in self.keys] def Task(func, *args, **kwargs): """Adapts a callback-based asynchronous function for use in coroutines. Takes a function (and optional additional arguments) and runs it with those arguments plus a ``callback`` keyword argument. The argument passed to the callback is returned as the result of the yield expression. .. versionchanged:: 4.0 ``gen.Task`` is now a function that returns a `.Future`, instead of a subclass of `YieldPoint`. It still behaves the same way when yielded. """ future = Future() def handle_exception(typ, value, tb): if future.done(): return False future.set_exc_info((typ, value, tb)) return True def set_result(result): if future.done(): return future.set_result(result) with stack_context.ExceptionStackContext(handle_exception): func(*args, callback=_argument_adapter(set_result), **kwargs) return future class YieldFuture(YieldPoint): def __init__(self, future, io_loop=None): """Adapts a `.Future` to the `YieldPoint` interface. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ self.future = future self.io_loop = io_loop or IOLoop.current() def start(self, runner): if not self.future.done(): self.runner = runner self.key = object() runner.register_callback(self.key) self.io_loop.add_future(self.future, runner.result_callback(self.key)) else: self.runner = None self.result_fn = self.future.result def is_ready(self): if self.runner is not None: return self.runner.is_ready(self.key) else: return True def get_result(self): if self.runner is not None: return self.runner.pop_result(self.key).result() else: return self.result_fn() def _contains_yieldpoint(children): """Returns True if ``children`` contains any YieldPoints. ``children`` may be a dict or a list, as used by `MultiYieldPoint` and `multi_future`. """ if isinstance(children, dict): return any(isinstance(i, YieldPoint) for i in children.values()) if isinstance(children, list): return any(isinstance(i, YieldPoint) for i in children) return False def multi(children, quiet_exceptions=()): """Runs multiple asynchronous operations in parallel. ``children`` may either be a list or a dict whose values are yieldable objects. ``multi()`` returns a new yieldable object that resolves to a parallel structure containing their results. If ``children`` is a list, the result is a list of results in the same order; if it is a dict, the result is a dict with the same keys. That is, ``results = yield multi(list_of_futures)`` is equivalent to:: results = [] for future in list_of_futures: results.append(yield future) If any children raise exceptions, ``multi()`` will raise the first one. All others will be logged, unless they are of types contained in the ``quiet_exceptions`` argument. If any of the inputs are `YieldPoints `, the returned yieldable object is a `YieldPoint`. Otherwise, returns a `.Future`. This means that the result of `multi` can be used in a native coroutine if and only if all of its children can be. In a ``yield``-based coroutine, it is not normally necessary to call this function directly, since the coroutine runner will do it automatically when a list or dict is yielded. However, it is necessary in ``await``-based coroutines, or to pass the ``quiet_exceptions`` argument. This function is available under the names ``multi()`` and ``Multi()`` for historical reasons. .. versionchanged:: 4.2 If multiple yieldables fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` argument to suppress this logging for selected exception types. .. versionchanged:: 4.3 Replaced the class ``Multi`` and the function ``multi_future`` with a unified function ``multi``. Added support for yieldables other than `YieldPoint` and `.Future`. """ if _contains_yieldpoint(children): return MultiYieldPoint(children, quiet_exceptions=quiet_exceptions) else: return multi_future(children, quiet_exceptions=quiet_exceptions) Multi = multi class MultiYieldPoint(YieldPoint): """Runs multiple asynchronous operations in parallel. This class is similar to `multi`, but it always creates a stack context even when no children require it. It is not compatible with native coroutines. .. versionchanged:: 4.2 If multiple ``YieldPoints`` fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` argument to suppress this logging for selected exception types. .. versionchanged:: 4.3 Renamed from ``Multi`` to ``MultiYieldPoint``. The name ``Multi`` remains as an alias for the equivalent `multi` function. .. deprecated:: 4.3 Use `multi` instead. """ def __init__(self, children, quiet_exceptions=()): self.keys = None if isinstance(children, dict): self.keys = list(children.keys()) children = children.values() self.children = [] for i in children: if not isinstance(i, YieldPoint): i = convert_yielded(i) if is_future(i): i = YieldFuture(i) self.children.append(i) assert all(isinstance(i, YieldPoint) for i in self.children) self.unfinished_children = set(self.children) self.quiet_exceptions = quiet_exceptions def start(self, runner): for i in self.children: i.start(runner) def is_ready(self): finished = list(itertools.takewhile( lambda i: i.is_ready(), self.unfinished_children)) self.unfinished_children.difference_update(finished) return not self.unfinished_children def get_result(self): result_list = [] exc_info = None for f in self.children: try: result_list.append(f.get_result()) except Exception as e: if exc_info is None: exc_info = sys.exc_info() else: if not isinstance(e, self.quiet_exceptions): app_log.error("Multiple exceptions in yield list", exc_info=True) if exc_info is not None: raise_exc_info(exc_info) if self.keys is not None: return dict(zip(self.keys, result_list)) else: return list(result_list) def multi_future(children, quiet_exceptions=()): """Wait for multiple asynchronous futures in parallel. This function is similar to `multi`, but does not support `YieldPoints `. .. versionadded:: 4.0 .. versionchanged:: 4.2 If multiple ``Futures`` fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` argument to suppress this logging for selected exception types. .. deprecated:: 4.3 Use `multi` instead. """ if isinstance(children, dict): keys = list(children.keys()) children = children.values() else: keys = None children = list(map(convert_yielded, children)) assert all(is_future(i) for i in children) unfinished_children = set(children) future = Future() if not children: future.set_result({} if keys is not None else []) def callback(f): unfinished_children.remove(f) if not unfinished_children: result_list = [] for f in children: try: result_list.append(f.result()) except Exception as e: if future.done(): if not isinstance(e, quiet_exceptions): app_log.error("Multiple exceptions in yield list", exc_info=True) else: future.set_exc_info(sys.exc_info()) if not future.done(): if keys is not None: future.set_result(dict(zip(keys, result_list))) else: future.set_result(result_list) listening = set() for f in children: if f not in listening: listening.add(f) f.add_done_callback(callback) return future def maybe_future(x): """Converts ``x`` into a `.Future`. If ``x`` is already a `.Future`, it is simply returned; otherwise it is wrapped in a new `.Future`. This is suitable for use as ``result = yield gen.maybe_future(f())`` when you don't know whether ``f()`` returns a `.Future` or not. .. deprecated:: 4.3 This function only handles ``Futures``, not other yieldable objects. Instead of `maybe_future`, check for the non-future result types you expect (often just ``None``), and ``yield`` anything unknown. """ if is_future(x): return x else: fut = Future() fut.set_result(x) return fut def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()): """Wraps a `.Future` (or other yieldable object) in a timeout. Raises `TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) If the wrapped `.Future` fails after it has timed out, the exception will be logged unless it is of a type contained in ``quiet_exceptions`` (which may be an exception type or a sequence of types). Does not support `YieldPoint` subclasses. .. versionadded:: 4.0 .. versionchanged:: 4.1 Added the ``quiet_exceptions`` argument and the logging of unhandled exceptions. .. versionchanged:: 4.4 Added support for yieldable objects other than `.Future`. """ # TODO: allow YieldPoints in addition to other yieldables? # Tricky to do with stack_context semantics. # # It's tempting to optimize this by cancelling the input future on timeout # instead of creating a new one, but A) we can't know if we are the only # one waiting on the input future, so cancelling it might disrupt other # callers and B) concurrent futures can only be cancelled while they are # in the queue, so cancellation cannot reliably bound our waiting time. future = convert_yielded(future) result = Future() chain_future(future, result) if io_loop is None: io_loop = IOLoop.current() def error_callback(future): try: future.result() except Exception as e: if not isinstance(e, quiet_exceptions): app_log.error("Exception in Future %r after timeout", future, exc_info=True) def timeout_callback(): result.set_exception(TimeoutError("Timeout")) # In case the wrapped future goes on to fail, log it. future.add_done_callback(error_callback) timeout_handle = io_loop.add_timeout( timeout, timeout_callback) if isinstance(future, Future): # We know this future will resolve on the IOLoop, so we don't # need the extra thread-safety of IOLoop.add_future (and we also # don't care about StackContext here. future.add_done_callback( lambda future: io_loop.remove_timeout(timeout_handle)) else: # concurrent.futures.Futures may resolve on any thread, so we # need to route them back to the IOLoop. io_loop.add_future( future, lambda future: io_loop.remove_timeout(timeout_handle)) return result def sleep(duration): """Return a `.Future` that resolves after the given number of seconds. When used with ``yield`` in a coroutine, this is a non-blocking analogue to `time.sleep` (which should not be used in coroutines because it is blocking):: yield gen.sleep(0.5) Note that calling this function on its own does nothing; you must wait on the `.Future` it returns (usually by yielding it). .. versionadded:: 4.1 """ f = Future() IOLoop.current().call_later(duration, lambda: f.set_result(None)) return f _null_future = Future() _null_future.set_result(None) moment = Future() moment.__doc__ = \ """A special object which may be yielded to allow the IOLoop to run for one iteration. This is not needed in normal use but it can be helpful in long-running coroutines that are likely to yield Futures that are ready instantly. Usage: ``yield gen.moment`` .. versionadded:: 4.0 .. deprecated:: 4.5 ``yield None`` is now equivalent to ``yield gen.moment``. """ moment.set_result(None) class Runner(object): """Internal implementation of `tornado.gen.engine`. Maintains information about pending callbacks and their results. The results of the generator are stored in ``result_future`` (a `.TracebackFuture`) """ def __init__(self, gen, result_future, first_yielded): self.gen = gen self.result_future = result_future self.future = _null_future self.yield_point = None self.pending_callbacks = None self.results = None self.running = False self.finished = False self.had_exception = False self.io_loop = IOLoop.current() # For efficiency, we do not create a stack context until we # reach a YieldPoint (stack contexts are required for the historical # semantics of YieldPoints, but not for Futures). When we have # done so, this field will be set and must be called at the end # of the coroutine. self.stack_context_deactivate = None if self.handle_yield(first_yielded): gen = result_future = first_yielded = None self.run() def register_callback(self, key): """Adds ``key`` to the list of callbacks.""" if self.pending_callbacks is None: # Lazily initialize the old-style YieldPoint data structures. self.pending_callbacks = set() self.results = {} if key in self.pending_callbacks: raise KeyReuseError("key %r is already pending" % (key,)) self.pending_callbacks.add(key) def is_ready(self, key): """Returns true if a result is available for ``key``.""" if self.pending_callbacks is None or key not in self.pending_callbacks: raise UnknownKeyError("key %r is not pending" % (key,)) return key in self.results def set_result(self, key, result): """Sets the result for ``key`` and attempts to resume the generator.""" self.results[key] = result if self.yield_point is not None and self.yield_point.is_ready(): try: self.future.set_result(self.yield_point.get_result()) except: self.future.set_exc_info(sys.exc_info()) self.yield_point = None self.run() def pop_result(self, key): """Returns the result for ``key`` and unregisters it.""" self.pending_callbacks.remove(key) return self.results.pop(key) def run(self): """Starts or resumes the generator, running until it reaches a yield point that is not ready. """ if self.running or self.finished: return try: self.running = True while True: future = self.future if not future.done(): return self.future = None try: orig_stack_contexts = stack_context._state.contexts exc_info = None try: value = future.result() except Exception: self.had_exception = True exc_info = sys.exc_info() future = None if exc_info is not None: try: yielded = self.gen.throw(*exc_info) finally: # Break up a reference to itself # for faster GC on CPython. exc_info = None else: yielded = self.gen.send(value) if stack_context._state.contexts is not orig_stack_contexts: self.gen.throw( stack_context.StackContextInconsistentError( 'stack_context inconsistency (probably caused ' 'by yield within a "with StackContext" block)')) except (StopIteration, Return) as e: self.finished = True self.future = _null_future if self.pending_callbacks and not self.had_exception: # If we ran cleanly without waiting on all callbacks # raise an error (really more of a warning). If we # had an exception then some callbacks may have been # orphaned, so skip the check in that case. raise LeakedCallbackError( "finished without waiting for callbacks %r" % self.pending_callbacks) self.result_future.set_result(_value_from_stopiteration(e)) self.result_future = None self._deactivate_stack_context() return except Exception: self.finished = True self.future = _null_future self.result_future.set_exc_info(sys.exc_info()) self.result_future = None self._deactivate_stack_context() return if not self.handle_yield(yielded): return yielded = None finally: self.running = False def handle_yield(self, yielded): # Lists containing YieldPoints require stack contexts; # other lists are handled in convert_yielded. if _contains_yieldpoint(yielded): yielded = multi(yielded) if isinstance(yielded, YieldPoint): # YieldPoints are too closely coupled to the Runner to go # through the generic convert_yielded mechanism. self.future = TracebackFuture() def start_yield_point(): try: yielded.start(self) if yielded.is_ready(): self.future.set_result( yielded.get_result()) else: self.yield_point = yielded except Exception: self.future = TracebackFuture() self.future.set_exc_info(sys.exc_info()) if self.stack_context_deactivate is None: # Start a stack context if this is the first # YieldPoint we've seen. with stack_context.ExceptionStackContext( self.handle_exception) as deactivate: self.stack_context_deactivate = deactivate def cb(): start_yield_point() self.run() self.io_loop.add_callback(cb) return False else: start_yield_point() else: try: self.future = convert_yielded(yielded) except BadYieldError: self.future = TracebackFuture() self.future.set_exc_info(sys.exc_info()) if not self.future.done() or self.future is moment: def inner(f): # Break a reference cycle to speed GC. f = None # noqa self.run() self.io_loop.add_future( self.future, inner) return False return True def result_callback(self, key): return stack_context.wrap(_argument_adapter( functools.partial(self.set_result, key))) def handle_exception(self, typ, value, tb): if not self.running and not self.finished: self.future = TracebackFuture() self.future.set_exc_info((typ, value, tb)) self.run() return True else: return False def _deactivate_stack_context(self): if self.stack_context_deactivate is not None: self.stack_context_deactivate() self.stack_context_deactivate = None Arguments = collections.namedtuple('Arguments', ['args', 'kwargs']) def _argument_adapter(callback): """Returns a function that when invoked runs ``callback`` with one arg. If the function returned by this function is called with exactly one argument, that argument is passed to ``callback``. Otherwise the args tuple and kwargs dict are wrapped in an `Arguments` object. """ def wrapper(*args, **kwargs): if kwargs or len(args) > 1: callback(Arguments(args, kwargs)) elif args: callback(args[0]) else: callback(None) return wrapper # Convert Awaitables into Futures. It is unfortunately possible # to have infinite recursion here if those Awaitables assume that # we're using a different coroutine runner and yield objects # we don't understand. If that happens, the solution is to # register that runner's yieldable objects with convert_yielded. if sys.version_info >= (3, 3): exec(textwrap.dedent(""" @coroutine def _wrap_awaitable(x): if hasattr(x, '__await__'): x = x.__await__() return (yield from x) """)) else: # Py2-compatible version for use with Cython. # Copied from PEP 380. @coroutine def _wrap_awaitable(x): if hasattr(x, '__await__'): _i = x.__await__() else: _i = iter(x) try: _y = next(_i) except StopIteration as _e: _r = _value_from_stopiteration(_e) else: while 1: try: _s = yield _y except GeneratorExit as _e: try: _m = _i.close except AttributeError: pass else: _m() raise _e except BaseException as _e: _x = sys.exc_info() try: _m = _i.throw except AttributeError: raise _e else: try: _y = _m(*_x) except StopIteration as _e: _r = _value_from_stopiteration(_e) break else: try: if _s is None: _y = next(_i) else: _y = _i.send(_s) except StopIteration as _e: _r = _value_from_stopiteration(_e) break raise Return(_r) def convert_yielded(yielded): """Convert a yielded object into a `.Future`. The default implementation accepts lists, dictionaries, and Futures. If the `~functools.singledispatch` library is available, this function may be extended to support additional types. For example:: @convert_yielded.register(asyncio.Future) def _(asyncio_future): return tornado.platform.asyncio.to_tornado_future(asyncio_future) .. versionadded:: 4.1 """ # Lists and dicts containing YieldPoints were handled earlier. if yielded is None: return moment elif isinstance(yielded, (list, dict)): return multi(yielded) elif is_future(yielded): return yielded elif isawaitable(yielded): return _wrap_awaitable(yielded) else: raise BadYieldError("yielded unknown object %r" % (yielded,)) if singledispatch is not None: convert_yielded = singledispatch(convert_yielded) try: # If we can import t.p.asyncio, do it for its side effect # (registering asyncio.Future with convert_yielded). # It's ugly to do this here, but it prevents a cryptic # infinite recursion in _wrap_awaitable. # Note that even with this, asyncio integration is unlikely # to work unless the application also configures AsyncIOLoop, # but at least the error messages in that case are more # comprehensible than a stack overflow. import tornado.platform.asyncio except ImportError: pass else: # Reference the imported module to make pyflakes happy. tornado tornado-4.5.3/tornado/http1connection.py000066400000000000000000000762401322420601000203300ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2014 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Client and server implementations of HTTP/1.x. .. versionadded:: 4.0 """ from __future__ import absolute_import, division, print_function import re from tornado.concurrent import Future from tornado.escape import native_str, utf8 from tornado import gen from tornado import httputil from tornado import iostream from tornado.log import gen_log, app_log from tornado import stack_context from tornado.util import GzipDecompressor, PY3 class _QuietException(Exception): def __init__(self): pass class _ExceptionLoggingContext(object): """Used with the ``with`` statement when calling delegate methods to log any exceptions with the given logger. Any exceptions caught are converted to _QuietException """ def __init__(self, logger): self.logger = logger def __enter__(self): pass def __exit__(self, typ, value, tb): if value is not None: self.logger.error("Uncaught exception", exc_info=(typ, value, tb)) raise _QuietException class HTTP1ConnectionParameters(object): """Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`. """ def __init__(self, no_keep_alive=False, chunk_size=None, max_header_size=None, header_timeout=None, max_body_size=None, body_timeout=None, decompress=False): """ :arg bool no_keep_alive: If true, always close the connection after one request. :arg int chunk_size: how much data to read into memory at once :arg int max_header_size: maximum amount of data for HTTP headers :arg float header_timeout: how long to wait for all headers (seconds) :arg int max_body_size: maximum amount of data for body :arg float body_timeout: how long to wait while reading body (seconds) :arg bool decompress: if true, decode incoming ``Content-Encoding: gzip`` """ self.no_keep_alive = no_keep_alive self.chunk_size = chunk_size or 65536 self.max_header_size = max_header_size or 65536 self.header_timeout = header_timeout self.max_body_size = max_body_size self.body_timeout = body_timeout self.decompress = decompress class HTTP1Connection(httputil.HTTPConnection): """Implements the HTTP/1.x protocol. This class can be on its own for clients, or via `HTTP1ServerConnection` for servers. """ def __init__(self, stream, is_client, params=None, context=None): """ :arg stream: an `.IOStream` :arg bool is_client: client or server :arg params: a `.HTTP1ConnectionParameters` instance or ``None`` :arg context: an opaque application-defined object that can be accessed as ``connection.context``. """ self.is_client = is_client self.stream = stream if params is None: params = HTTP1ConnectionParameters() self.params = params self.context = context self.no_keep_alive = params.no_keep_alive # The body limits can be altered by the delegate, so save them # here instead of just referencing self.params later. self._max_body_size = (self.params.max_body_size or self.stream.max_buffer_size) self._body_timeout = self.params.body_timeout # _write_finished is set to True when finish() has been called, # i.e. there will be no more data sent. Data may still be in the # stream's write buffer. self._write_finished = False # True when we have read the entire incoming body. self._read_finished = False # _finish_future resolves when all data has been written and flushed # to the IOStream. self._finish_future = Future() # If true, the connection should be closed after this request # (after the response has been written in the server side, # and after it has been read in the client) self._disconnect_on_finish = False self._clear_callbacks() # Save the start lines after we read or write them; they # affect later processing (e.g. 304 responses and HEAD methods # have content-length but no bodies) self._request_start_line = None self._response_start_line = None self._request_headers = None # True if we are writing output with chunked encoding. self._chunking_output = None # While reading a body with a content-length, this is the # amount left to read. self._expected_content_remaining = None # A Future for our outgoing writes, returned by IOStream.write. self._pending_write = None def read_response(self, delegate): """Read a single HTTP response. Typical client-mode usage is to write a request using `write_headers`, `write`, and `finish`, and then call ``read_response``. :arg delegate: a `.HTTPMessageDelegate` Returns a `.Future` that resolves to None after the full response has been read. """ if self.params.decompress: delegate = _GzipMessageDelegate(delegate, self.params.chunk_size) return self._read_message(delegate) @gen.coroutine def _read_message(self, delegate): need_delegate_close = False try: header_future = self.stream.read_until_regex( b"\r?\n\r?\n", max_bytes=self.params.max_header_size) if self.params.header_timeout is None: header_data = yield header_future else: try: header_data = yield gen.with_timeout( self.stream.io_loop.time() + self.params.header_timeout, header_future, io_loop=self.stream.io_loop, quiet_exceptions=iostream.StreamClosedError) except gen.TimeoutError: self.close() raise gen.Return(False) start_line, headers = self._parse_headers(header_data) if self.is_client: start_line = httputil.parse_response_start_line(start_line) self._response_start_line = start_line else: start_line = httputil.parse_request_start_line(start_line) self._request_start_line = start_line self._request_headers = headers self._disconnect_on_finish = not self._can_keep_alive( start_line, headers) need_delegate_close = True with _ExceptionLoggingContext(app_log): header_future = delegate.headers_received(start_line, headers) if header_future is not None: yield header_future if self.stream is None: # We've been detached. need_delegate_close = False raise gen.Return(False) skip_body = False if self.is_client: if (self._request_start_line is not None and self._request_start_line.method == 'HEAD'): skip_body = True code = start_line.code if code == 304: # 304 responses may include the content-length header # but do not actually have a body. # http://tools.ietf.org/html/rfc7230#section-3.3 skip_body = True if code >= 100 and code < 200: # 1xx responses should never indicate the presence of # a body. if ('Content-Length' in headers or 'Transfer-Encoding' in headers): raise httputil.HTTPInputError( "Response code %d cannot have body" % code) # TODO: client delegates will get headers_received twice # in the case of a 100-continue. Document or change? yield self._read_message(delegate) else: if (headers.get("Expect") == "100-continue" and not self._write_finished): self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n") if not skip_body: body_future = self._read_body( start_line.code if self.is_client else 0, headers, delegate) if body_future is not None: if self._body_timeout is None: yield body_future else: try: yield gen.with_timeout( self.stream.io_loop.time() + self._body_timeout, body_future, self.stream.io_loop, quiet_exceptions=iostream.StreamClosedError) except gen.TimeoutError: gen_log.info("Timeout reading body from %s", self.context) self.stream.close() raise gen.Return(False) self._read_finished = True if not self._write_finished or self.is_client: need_delegate_close = False with _ExceptionLoggingContext(app_log): delegate.finish() # If we're waiting for the application to produce an asynchronous # response, and we're not detached, register a close callback # on the stream (we didn't need one while we were reading) if (not self._finish_future.done() and self.stream is not None and not self.stream.closed()): self.stream.set_close_callback(self._on_connection_close) yield self._finish_future if self.is_client and self._disconnect_on_finish: self.close() if self.stream is None: raise gen.Return(False) except httputil.HTTPInputError as e: gen_log.info("Malformed HTTP message from %s: %s", self.context, e) self.close() raise gen.Return(False) finally: if need_delegate_close: with _ExceptionLoggingContext(app_log): delegate.on_connection_close() header_future = None self._clear_callbacks() raise gen.Return(True) def _clear_callbacks(self): """Clears the callback attributes. This allows the request handler to be garbage collected more quickly in CPython by breaking up reference cycles. """ self._write_callback = None self._write_future = None self._close_callback = None if self.stream is not None: self.stream.set_close_callback(None) def set_close_callback(self, callback): """Sets a callback that will be run when the connection is closed. .. deprecated:: 4.0 Use `.HTTPMessageDelegate.on_connection_close` instead. """ self._close_callback = stack_context.wrap(callback) def _on_connection_close(self): # Note that this callback is only registered on the IOStream # when we have finished reading the request and are waiting for # the application to produce its response. if self._close_callback is not None: callback = self._close_callback self._close_callback = None callback() if not self._finish_future.done(): self._finish_future.set_result(None) self._clear_callbacks() def close(self): if self.stream is not None: self.stream.close() self._clear_callbacks() if not self._finish_future.done(): self._finish_future.set_result(None) def detach(self): """Take control of the underlying stream. Returns the underlying `.IOStream` object and stops all further HTTP processing. May only be called during `.HTTPMessageDelegate.headers_received`. Intended for implementing protocols like websockets that tunnel over an HTTP handshake. """ self._clear_callbacks() stream = self.stream self.stream = None if not self._finish_future.done(): self._finish_future.set_result(None) return stream def set_body_timeout(self, timeout): """Sets the body timeout for a single request. Overrides the value from `.HTTP1ConnectionParameters`. """ self._body_timeout = timeout def set_max_body_size(self, max_body_size): """Sets the body size limit for a single request. Overrides the value from `.HTTP1ConnectionParameters`. """ self._max_body_size = max_body_size def write_headers(self, start_line, headers, chunk=None, callback=None): """Implements `.HTTPConnection.write_headers`.""" lines = [] if self.is_client: self._request_start_line = start_line lines.append(utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1]))) # Client requests with a non-empty body must have either a # Content-Length or a Transfer-Encoding. self._chunking_output = ( start_line.method in ('POST', 'PUT', 'PATCH') and 'Content-Length' not in headers and 'Transfer-Encoding' not in headers) else: self._response_start_line = start_line lines.append(utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2]))) self._chunking_output = ( # TODO: should this use # self._request_start_line.version or # start_line.version? self._request_start_line.version == 'HTTP/1.1' and # 1xx, 204 and 304 responses have no body (not even a zero-length # body), and so should not have either Content-Length or # Transfer-Encoding headers. start_line.code not in (204, 304) and (start_line.code < 100 or start_line.code >= 200) and # No need to chunk the output if a Content-Length is specified. 'Content-Length' not in headers and # Applications are discouraged from touching Transfer-Encoding, # but if they do, leave it alone. 'Transfer-Encoding' not in headers) # If a 1.0 client asked for keep-alive, add the header. if (self._request_start_line.version == 'HTTP/1.0' and (self._request_headers.get('Connection', '').lower() == 'keep-alive')): headers['Connection'] = 'Keep-Alive' if self._chunking_output: headers['Transfer-Encoding'] = 'chunked' if (not self.is_client and (self._request_start_line.method == 'HEAD' or start_line.code == 304)): self._expected_content_remaining = 0 elif 'Content-Length' in headers: self._expected_content_remaining = int(headers['Content-Length']) else: self._expected_content_remaining = None # TODO: headers are supposed to be of type str, but we still have some # cases that let bytes slip through. Remove these native_str calls when those # are fixed. header_lines = (native_str(n) + ": " + native_str(v) for n, v in headers.get_all()) if PY3: lines.extend(l.encode('latin1') for l in header_lines) else: lines.extend(header_lines) for line in lines: if b'\n' in line: raise ValueError('Newline in header: ' + repr(line)) future = None if self.stream.closed(): future = self._write_future = Future() future.set_exception(iostream.StreamClosedError()) future.exception() else: if callback is not None: self._write_callback = stack_context.wrap(callback) else: future = self._write_future = Future() data = b"\r\n".join(lines) + b"\r\n\r\n" if chunk: data += self._format_chunk(chunk) self._pending_write = self.stream.write(data) self._pending_write.add_done_callback(self._on_write_complete) return future def _format_chunk(self, chunk): if self._expected_content_remaining is not None: self._expected_content_remaining -= len(chunk) if self._expected_content_remaining < 0: # Close the stream now to stop further framing errors. self.stream.close() raise httputil.HTTPOutputError( "Tried to write more data than Content-Length") if self._chunking_output and chunk: # Don't write out empty chunks because that means END-OF-STREAM # with chunked encoding return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n" else: return chunk def write(self, chunk, callback=None): """Implements `.HTTPConnection.write`. For backwards compatibility is is allowed but deprecated to skip `write_headers` and instead call `write()` with a pre-encoded header block. """ future = None if self.stream.closed(): future = self._write_future = Future() self._write_future.set_exception(iostream.StreamClosedError()) self._write_future.exception() else: if callback is not None: self._write_callback = stack_context.wrap(callback) else: future = self._write_future = Future() self._pending_write = self.stream.write(self._format_chunk(chunk)) self._pending_write.add_done_callback(self._on_write_complete) return future def finish(self): """Implements `.HTTPConnection.finish`.""" if (self._expected_content_remaining is not None and self._expected_content_remaining != 0 and not self.stream.closed()): self.stream.close() raise httputil.HTTPOutputError( "Tried to write %d bytes less than Content-Length" % self._expected_content_remaining) if self._chunking_output: if not self.stream.closed(): self._pending_write = self.stream.write(b"0\r\n\r\n") self._pending_write.add_done_callback(self._on_write_complete) self._write_finished = True # If the app finished the request while we're still reading, # divert any remaining data away from the delegate and # close the connection when we're done sending our response. # Closing the connection is the only way to avoid reading the # whole input body. if not self._read_finished: self._disconnect_on_finish = True # No more data is coming, so instruct TCP to send any remaining # data immediately instead of waiting for a full packet or ack. self.stream.set_nodelay(True) if self._pending_write is None: self._finish_request(None) else: self._pending_write.add_done_callback(self._finish_request) def _on_write_complete(self, future): exc = future.exception() if exc is not None and not isinstance(exc, iostream.StreamClosedError): future.result() if self._write_callback is not None: callback = self._write_callback self._write_callback = None self.stream.io_loop.add_callback(callback) if self._write_future is not None: future = self._write_future self._write_future = None future.set_result(None) def _can_keep_alive(self, start_line, headers): if self.params.no_keep_alive: return False connection_header = headers.get("Connection") if connection_header is not None: connection_header = connection_header.lower() if start_line.version == "HTTP/1.1": return connection_header != "close" elif ("Content-Length" in headers or headers.get("Transfer-Encoding", "").lower() == "chunked" or getattr(start_line, 'method', None) in ("HEAD", "GET")): # start_line may be a request or response start line; only # the former has a method attribute. return connection_header == "keep-alive" return False def _finish_request(self, future): self._clear_callbacks() if not self.is_client and self._disconnect_on_finish: self.close() return # Turn Nagle's algorithm back on, leaving the stream in its # default state for the next request. self.stream.set_nodelay(False) if not self._finish_future.done(): self._finish_future.set_result(None) def _parse_headers(self, data): # The lstrip removes newlines that some implementations sometimes # insert between messages of a reused connection. Per RFC 7230, # we SHOULD ignore at least one empty line before the request. # http://tools.ietf.org/html/rfc7230#section-3.5 data = native_str(data.decode('latin1')).lstrip("\r\n") # RFC 7230 section allows for both CRLF and bare LF. eol = data.find("\n") start_line = data[:eol].rstrip("\r") try: headers = httputil.HTTPHeaders.parse(data[eol:]) except ValueError: # probably form split() if there was no ':' in the line raise httputil.HTTPInputError("Malformed HTTP headers: %r" % data[eol:100]) return start_line, headers def _read_body(self, code, headers, delegate): if "Content-Length" in headers: if "Transfer-Encoding" in headers: # Response cannot contain both Content-Length and # Transfer-Encoding headers. # http://tools.ietf.org/html/rfc7230#section-3.3.3 raise httputil.HTTPInputError( "Response with both Transfer-Encoding and Content-Length") if "," in headers["Content-Length"]: # Proxies sometimes cause Content-Length headers to get # duplicated. If all the values are identical then we can # use them but if they differ it's an error. pieces = re.split(r',\s*', headers["Content-Length"]) if any(i != pieces[0] for i in pieces): raise httputil.HTTPInputError( "Multiple unequal Content-Lengths: %r" % headers["Content-Length"]) headers["Content-Length"] = pieces[0] try: content_length = int(headers["Content-Length"]) except ValueError: # Handles non-integer Content-Length value. raise httputil.HTTPInputError( "Only integer Content-Length is allowed: %s" % headers["Content-Length"]) if content_length > self._max_body_size: raise httputil.HTTPInputError("Content-Length too long") else: content_length = None if code == 204: # This response code is not allowed to have a non-empty body, # and has an implicit length of zero instead of read-until-close. # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3 if ("Transfer-Encoding" in headers or content_length not in (None, 0)): raise httputil.HTTPInputError( "Response with code %d should not have body" % code) content_length = 0 if content_length is not None: return self._read_fixed_body(content_length, delegate) if headers.get("Transfer-Encoding", "").lower() == "chunked": return self._read_chunked_body(delegate) if self.is_client: return self._read_body_until_close(delegate) return None @gen.coroutine def _read_fixed_body(self, content_length, delegate): while content_length > 0: body = yield self.stream.read_bytes( min(self.params.chunk_size, content_length), partial=True) content_length -= len(body) if not self._write_finished or self.is_client: with _ExceptionLoggingContext(app_log): ret = delegate.data_received(body) if ret is not None: yield ret @gen.coroutine def _read_chunked_body(self, delegate): # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1 total_size = 0 while True: chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64) chunk_len = int(chunk_len.strip(), 16) if chunk_len == 0: crlf = yield self.stream.read_bytes(2) if crlf != b'\r\n': raise httputil.HTTPInputError("improperly terminated chunked request") return total_size += chunk_len if total_size > self._max_body_size: raise httputil.HTTPInputError("chunked body too large") bytes_to_read = chunk_len while bytes_to_read: chunk = yield self.stream.read_bytes( min(bytes_to_read, self.params.chunk_size), partial=True) bytes_to_read -= len(chunk) if not self._write_finished or self.is_client: with _ExceptionLoggingContext(app_log): ret = delegate.data_received(chunk) if ret is not None: yield ret # chunk ends with \r\n crlf = yield self.stream.read_bytes(2) assert crlf == b"\r\n" @gen.coroutine def _read_body_until_close(self, delegate): body = yield self.stream.read_until_close() if not self._write_finished or self.is_client: with _ExceptionLoggingContext(app_log): delegate.data_received(body) class _GzipMessageDelegate(httputil.HTTPMessageDelegate): """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``. """ def __init__(self, delegate, chunk_size): self._delegate = delegate self._chunk_size = chunk_size self._decompressor = None def headers_received(self, start_line, headers): if headers.get("Content-Encoding") == "gzip": self._decompressor = GzipDecompressor() # Downstream delegates will only see uncompressed data, # so rename the content-encoding header. # (but note that curl_httpclient doesn't do this). headers.add("X-Consumed-Content-Encoding", headers["Content-Encoding"]) del headers["Content-Encoding"] return self._delegate.headers_received(start_line, headers) @gen.coroutine def data_received(self, chunk): if self._decompressor: compressed_data = chunk while compressed_data: decompressed = self._decompressor.decompress( compressed_data, self._chunk_size) if decompressed: ret = self._delegate.data_received(decompressed) if ret is not None: yield ret compressed_data = self._decompressor.unconsumed_tail else: ret = self._delegate.data_received(chunk) if ret is not None: yield ret def finish(self): if self._decompressor is not None: tail = self._decompressor.flush() if tail: # I believe the tail will always be empty (i.e. # decompress will return all it can). The purpose # of the flush call is to detect errors such # as truncated input. But in case it ever returns # anything, treat it as an extra chunk self._delegate.data_received(tail) return self._delegate.finish() def on_connection_close(self): return self._delegate.on_connection_close() class HTTP1ServerConnection(object): """An HTTP/1.x server.""" def __init__(self, stream, params=None, context=None): """ :arg stream: an `.IOStream` :arg params: a `.HTTP1ConnectionParameters` or None :arg context: an opaque application-defined object that is accessible as ``connection.context`` """ self.stream = stream if params is None: params = HTTP1ConnectionParameters() self.params = params self.context = context self._serving_future = None @gen.coroutine def close(self): """Closes the connection. Returns a `.Future` that resolves after the serving loop has exited. """ self.stream.close() # Block until the serving loop is done, but ignore any exceptions # (start_serving is already responsible for logging them). try: yield self._serving_future except Exception: pass def start_serving(self, delegate): """Starts serving requests on this connection. :arg delegate: a `.HTTPServerConnectionDelegate` """ assert isinstance(delegate, httputil.HTTPServerConnectionDelegate) self._serving_future = self._server_request_loop(delegate) # Register the future on the IOLoop so its errors get logged. self.stream.io_loop.add_future(self._serving_future, lambda f: f.result()) @gen.coroutine def _server_request_loop(self, delegate): try: while True: conn = HTTP1Connection(self.stream, False, self.params, self.context) request_delegate = delegate.start_request(self, conn) try: ret = yield conn.read_response(request_delegate) except (iostream.StreamClosedError, iostream.UnsatisfiableReadError): return except _QuietException: # This exception was already logged. conn.close() return except Exception: gen_log.error("Uncaught exception", exc_info=True) conn.close() return if not ret: return yield gen.moment finally: delegate.on_close(self) tornado-4.5.3/tornado/httpclient.py000066400000000000000000000656771322420601000174020ustar00rootroot00000000000000"""Blocking and non-blocking HTTP client interfaces. This module defines a common interface shared by two implementations, ``simple_httpclient`` and ``curl_httpclient``. Applications may either instantiate their chosen implementation class directly or use the `AsyncHTTPClient` class from this module, which selects an implementation that can be overridden with the `AsyncHTTPClient.configure` method. The default implementation is ``simple_httpclient``, and this is expected to be suitable for most users' needs. However, some applications may wish to switch to ``curl_httpclient`` for reasons such as the following: * ``curl_httpclient`` has some features not found in ``simple_httpclient``, including support for HTTP proxies and the ability to use a specified network interface. * ``curl_httpclient`` is more likely to be compatible with sites that are not-quite-compliant with the HTTP spec, or sites that use little-exercised features of HTTP. * ``curl_httpclient`` is faster. * ``curl_httpclient`` was the default prior to Tornado 2.0. Note that if you are using ``curl_httpclient``, it is highly recommended that you use a recent version of ``libcurl`` and ``pycurl``. Currently the minimum supported version of libcurl is 7.22.0, and the minimum version of pycurl is 7.18.2. It is highly recommended that your ``libcurl`` installation is built with asynchronous DNS resolver (threaded or c-ares), otherwise you may encounter various problems with request timeouts (for more information, see http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS and comments in curl_httpclient.py). To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup:: AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") """ from __future__ import absolute_import, division, print_function import functools import time import weakref from tornado.concurrent import TracebackFuture from tornado.escape import utf8, native_str from tornado import httputil, stack_context from tornado.ioloop import IOLoop from tornado.util import Configurable class HTTPClient(object): """A blocking HTTP client. This interface is provided for convenience and testing; most applications that are running an IOLoop will want to use `AsyncHTTPClient` instead. Typical usage looks like this:: http_client = httpclient.HTTPClient() try: response = http_client.fetch("http://www.google.com/") print(response.body) except httpclient.HTTPError as e: # HTTPError is raised for non-200 responses; the response # can be found in e.response. print("Error: " + str(e)) except Exception as e: # Other errors are possible, such as IOError. print("Error: " + str(e)) http_client.close() """ def __init__(self, async_client_class=None, **kwargs): self._io_loop = IOLoop(make_current=False) if async_client_class is None: async_client_class = AsyncHTTPClient self._async_client = async_client_class(self._io_loop, **kwargs) self._closed = False def __del__(self): self.close() def close(self): """Closes the HTTPClient, freeing any resources used.""" if not self._closed: self._async_client.close() self._io_loop.close() self._closed = True def fetch(self, request, **kwargs): """Executes a request, returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` If an error occurs during the fetch, we raise an `HTTPError` unless the ``raise_error`` keyword argument is set to False. """ response = self._io_loop.run_sync(functools.partial( self._async_client.fetch, request, **kwargs)) return response class AsyncHTTPClient(Configurable): """An non-blocking HTTP client. Example usage:: def handle_response(response): if response.error: print("Error: %s" % response.error) else: print(response.body) http_client = AsyncHTTPClient() http_client.fetch("http://www.google.com/", handle_response) The constructor for this class is magic in several respects: It actually creates an instance of an implementation-specific subclass, and instances are reused as a kind of pseudo-singleton (one per `.IOLoop`). The keyword argument ``force_instance=True`` can be used to suppress this singleton behavior. Unless ``force_instance=True`` is used, no arguments other than ``io_loop`` should be passed to the `AsyncHTTPClient` constructor. The implementation subclass as well as arguments to its constructor can be set with the static method `configure()` All `AsyncHTTPClient` implementations support a ``defaults`` keyword argument, which can be used to set default values for `HTTPRequest` attributes. For example:: AsyncHTTPClient.configure( None, defaults=dict(user_agent="MyUserAgent")) # or with force_instance: client = AsyncHTTPClient(force_instance=True, defaults=dict(user_agent="MyUserAgent")) .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ @classmethod def configurable_base(cls): return AsyncHTTPClient @classmethod def configurable_default(cls): from tornado.simple_httpclient import SimpleAsyncHTTPClient return SimpleAsyncHTTPClient @classmethod def _async_clients(cls): attr_name = '_async_client_dict_' + cls.__name__ if not hasattr(cls, attr_name): setattr(cls, attr_name, weakref.WeakKeyDictionary()) return getattr(cls, attr_name) def __new__(cls, io_loop=None, force_instance=False, **kwargs): io_loop = io_loop or IOLoop.current() if force_instance: instance_cache = None else: instance_cache = cls._async_clients() if instance_cache is not None and io_loop in instance_cache: return instance_cache[io_loop] instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop, **kwargs) # Make sure the instance knows which cache to remove itself from. # It can't simply call _async_clients() because we may be in # __new__(AsyncHTTPClient) but instance.__class__ may be # SimpleAsyncHTTPClient. instance._instance_cache = instance_cache if instance_cache is not None: instance_cache[instance.io_loop] = instance return instance def initialize(self, io_loop, defaults=None): self.io_loop = io_loop self.defaults = dict(HTTPRequest._DEFAULTS) if defaults is not None: self.defaults.update(defaults) self._closed = False def close(self): """Destroys this HTTP client, freeing any file descriptors used. This method is **not needed in normal use** due to the way that `AsyncHTTPClient` objects are transparently reused. ``close()`` is generally only necessary when either the `.IOLoop` is also being closed, or the ``force_instance=True`` argument was used when creating the `AsyncHTTPClient`. No other methods may be called on the `AsyncHTTPClient` after ``close()``. """ if self._closed: return self._closed = True if self._instance_cache is not None: if self._instance_cache.get(self.io_loop) is not self: raise RuntimeError("inconsistent AsyncHTTPClient cache") del self._instance_cache[self.io_loop] def fetch(self, request, callback=None, raise_error=True, **kwargs): """Executes a request, asynchronously returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` This method returns a `.Future` whose result is an `HTTPResponse`. By default, the ``Future`` will raise an `HTTPError` if the request returned a non-200 response code (other errors may also be raised if the server could not be contacted). Instead, if ``raise_error`` is set to False, the response will always be returned regardless of the response code. If a ``callback`` is given, it will be invoked with the `HTTPResponse`. In the callback interface, `HTTPError` is not automatically raised. Instead, you must check the response's ``error`` attribute or call its `~HTTPResponse.rethrow` method. """ if self._closed: raise RuntimeError("fetch() called on closed AsyncHTTPClient") if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) else: if kwargs: raise ValueError("kwargs can't be used if request is an HTTPRequest object") # We may modify this (to add Host, Accept-Encoding, etc), # so make sure we don't modify the caller's object. This is also # where normal dicts get converted to HTTPHeaders objects. request.headers = httputil.HTTPHeaders(request.headers) request = _RequestProxy(request, self.defaults) future = TracebackFuture() if callback is not None: callback = stack_context.wrap(callback) def handle_future(future): exc = future.exception() if isinstance(exc, HTTPError) and exc.response is not None: response = exc.response elif exc is not None: response = HTTPResponse( request, 599, error=exc, request_time=time.time() - request.start_time) else: response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) def handle_response(response): if raise_error and response.error: future.set_exception(response.error) else: future.set_result(response) self.fetch_impl(request, handle_response) return future def fetch_impl(self, request, callback): raise NotImplementedError() @classmethod def configure(cls, impl, **kwargs): """Configures the `AsyncHTTPClient` subclass to use. ``AsyncHTTPClient()`` actually creates an instance of a subclass. This method may be called with either a class object or the fully-qualified name of such a class (or ``None`` to use the default, ``SimpleAsyncHTTPClient``) If additional keyword arguments are given, they will be passed to the constructor of each subclass instance created. The keyword argument ``max_clients`` determines the maximum number of simultaneous `~AsyncHTTPClient.fetch()` operations that can execute in parallel on each `.IOLoop`. Additional arguments may be supported depending on the implementation class in use. Example:: AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") """ super(AsyncHTTPClient, cls).configure(impl, **kwargs) class HTTPRequest(object): """HTTP client request object.""" # Default values for HTTPRequest parameters. # Merged with the values on the request object by AsyncHTTPClient # implementations. _DEFAULTS = dict( connect_timeout=20.0, request_timeout=20.0, follow_redirects=True, max_redirects=5, decompress_response=True, proxy_password='', allow_nonstandard_methods=False, validate_cert=True) def __init__(self, url, method="GET", headers=None, body=None, auth_username=None, auth_password=None, auth_mode=None, connect_timeout=None, request_timeout=None, if_modified_since=None, follow_redirects=None, max_redirects=None, user_agent=None, use_gzip=None, network_interface=None, streaming_callback=None, header_callback=None, prepare_curl_callback=None, proxy_host=None, proxy_port=None, proxy_username=None, proxy_password=None, proxy_auth_mode=None, allow_nonstandard_methods=None, validate_cert=None, ca_certs=None, allow_ipv6=None, client_key=None, client_cert=None, body_producer=None, expect_100_continue=False, decompress_response=None, ssl_options=None): r"""All parameters except ``url`` are optional. :arg string url: URL to fetch :arg string method: HTTP method, e.g. "GET" or "POST" :arg headers: Additional HTTP headers to pass on the request :type headers: `~tornado.httputil.HTTPHeaders` or `dict` :arg body: HTTP request body as a string (byte or unicode; if unicode the utf-8 encoding will be used) :arg body_producer: Callable used for lazy/asynchronous request bodies. It is called with one argument, a ``write`` function, and should return a `.Future`. It should call the write function with new data as it becomes available. The write function returns a `.Future` which can be used for flow control. Only one of ``body`` and ``body_producer`` may be specified. ``body_producer`` is not supported on ``curl_httpclient``. When using ``body_producer`` it is recommended to pass a ``Content-Length`` in the headers as otherwise chunked encoding will be used, and many servers do not support chunked encoding on requests. New in Tornado 4.0 :arg string auth_username: Username for HTTP authentication :arg string auth_password: Password for HTTP authentication :arg string auth_mode: Authentication mode; default is "basic". Allowed values are implementation-defined; ``curl_httpclient`` supports "basic" and "digest"; ``simple_httpclient`` only supports "basic" :arg float connect_timeout: Timeout for initial connection in seconds, default 20 seconds :arg float request_timeout: Timeout for entire request in seconds, default 20 seconds :arg if_modified_since: Timestamp for ``If-Modified-Since`` header :type if_modified_since: `datetime` or `float` :arg bool follow_redirects: Should redirects be followed automatically or return the 3xx response? Default True. :arg int max_redirects: Limit for ``follow_redirects``, default 5. :arg string user_agent: String to send as ``User-Agent`` header :arg bool decompress_response: Request a compressed response from the server and decompress it after downloading. Default is True. New in Tornado 4.0. :arg bool use_gzip: Deprecated alias for ``decompress_response`` since Tornado 4.0. :arg string network_interface: Network interface to use for request. ``curl_httpclient`` only; see note below. :arg callable streaming_callback: If set, ``streaming_callback`` will be run with each chunk of data as it is received, and ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in the final response. :arg callable header_callback: If set, ``header_callback`` will be run with each header line as it is received (including the first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line containing only ``\r\n``. All lines include the trailing newline characters). ``HTTPResponse.headers`` will be empty in the final response. This is most useful in conjunction with ``streaming_callback``, because it's the only way to get access to header data while the request is in progress. :arg callable prepare_curl_callback: If set, will be called with a ``pycurl.Curl`` object to allow the application to make additional ``setopt`` calls. :arg string proxy_host: HTTP proxy hostname. To use proxies, ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``, ``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are currently only supported with ``curl_httpclient``. :arg int proxy_port: HTTP proxy port :arg string proxy_username: HTTP proxy username :arg string proxy_password: HTTP proxy password :arg string proxy_auth_mode: HTTP proxy Authentication mode; default is "basic". supports "basic" and "digest" :arg bool allow_nonstandard_methods: Allow unknown values for ``method`` argument? Default is False. :arg bool validate_cert: For HTTPS requests, validate the server's certificate? Default is True. :arg string ca_certs: filename of CA certificates in PEM format, or None to use defaults. See note below when used with ``curl_httpclient``. :arg string client_key: Filename for client SSL key, if any. See note below when used with ``curl_httpclient``. :arg string client_cert: Filename for client SSL certificate, if any. See note below when used with ``curl_httpclient``. :arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in ``simple_httpclient`` (unsupported by ``curl_httpclient``). Overrides ``validate_cert``, ``ca_certs``, ``client_key``, and ``client_cert``. :arg bool allow_ipv6: Use IPv6 when available? Default is true. :arg bool expect_100_continue: If true, send the ``Expect: 100-continue`` header and wait for a continue response before sending the request body. Only supported with simple_httpclient. .. note:: When using ``curl_httpclient`` certain options may be inherited by subsequent fetches because ``pycurl`` does not allow them to be cleanly reset. This applies to the ``ca_certs``, ``client_key``, ``client_cert``, and ``network_interface`` arguments. If you use these options, you should pass them on every request (you don't have to always use the same values, but it's not possible to mix requests that specify these options with ones that use the defaults). .. versionadded:: 3.1 The ``auth_mode`` argument. .. versionadded:: 4.0 The ``body_producer`` and ``expect_100_continue`` arguments. .. versionadded:: 4.2 The ``ssl_options`` argument. .. versionadded:: 4.5 The ``proxy_auth_mode`` argument. """ # Note that some of these attributes go through property setters # defined below. self.headers = headers if if_modified_since: self.headers["If-Modified-Since"] = httputil.format_timestamp( if_modified_since) self.proxy_host = proxy_host self.proxy_port = proxy_port self.proxy_username = proxy_username self.proxy_password = proxy_password self.proxy_auth_mode = proxy_auth_mode self.url = url self.method = method self.body = body self.body_producer = body_producer self.auth_username = auth_username self.auth_password = auth_password self.auth_mode = auth_mode self.connect_timeout = connect_timeout self.request_timeout = request_timeout self.follow_redirects = follow_redirects self.max_redirects = max_redirects self.user_agent = user_agent if decompress_response is not None: self.decompress_response = decompress_response else: self.decompress_response = use_gzip self.network_interface = network_interface self.streaming_callback = streaming_callback self.header_callback = header_callback self.prepare_curl_callback = prepare_curl_callback self.allow_nonstandard_methods = allow_nonstandard_methods self.validate_cert = validate_cert self.ca_certs = ca_certs self.allow_ipv6 = allow_ipv6 self.client_key = client_key self.client_cert = client_cert self.ssl_options = ssl_options self.expect_100_continue = expect_100_continue self.start_time = time.time() @property def headers(self): return self._headers @headers.setter def headers(self, value): if value is None: self._headers = httputil.HTTPHeaders() else: self._headers = value @property def body(self): return self._body @body.setter def body(self, value): self._body = utf8(value) @property def body_producer(self): return self._body_producer @body_producer.setter def body_producer(self, value): self._body_producer = stack_context.wrap(value) @property def streaming_callback(self): return self._streaming_callback @streaming_callback.setter def streaming_callback(self, value): self._streaming_callback = stack_context.wrap(value) @property def header_callback(self): return self._header_callback @header_callback.setter def header_callback(self, value): self._header_callback = stack_context.wrap(value) @property def prepare_curl_callback(self): return self._prepare_curl_callback @prepare_curl_callback.setter def prepare_curl_callback(self, value): self._prepare_curl_callback = stack_context.wrap(value) class HTTPResponse(object): """HTTP Response object. Attributes: * request: HTTPRequest object * code: numeric HTTP status code, e.g. 200 or 404 * reason: human-readable reason phrase describing the status code * headers: `tornado.httputil.HTTPHeaders` object * effective_url: final location of the resource after following any redirects * buffer: ``cStringIO`` object for response body * body: response body as bytes (created on demand from ``self.buffer``) * error: Exception object, if any * request_time: seconds from request start to finish * time_info: dictionary of diagnostic timing information from the request. Available data are subject to change, but currently uses timings available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html, plus ``queue``, which is the delay (if any) introduced by waiting for a slot under `AsyncHTTPClient`'s ``max_clients`` setting. """ def __init__(self, request, code, headers=None, buffer=None, effective_url=None, error=None, request_time=None, time_info=None, reason=None): if isinstance(request, _RequestProxy): self.request = request.request else: self.request = request self.code = code self.reason = reason or httputil.responses.get(code, "Unknown") if headers is not None: self.headers = headers else: self.headers = httputil.HTTPHeaders() self.buffer = buffer self._body = None if effective_url is None: self.effective_url = request.url else: self.effective_url = effective_url if error is None: if self.code < 200 or self.code >= 300: self.error = HTTPError(self.code, message=self.reason, response=self) else: self.error = None else: self.error = error self.request_time = request_time self.time_info = time_info or {} @property def body(self): if self.buffer is None: return None elif self._body is None: self._body = self.buffer.getvalue() return self._body def rethrow(self): """If there was an error on the request, raise an `HTTPError`.""" if self.error: raise self.error def __repr__(self): args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items())) return "%s(%s)" % (self.__class__.__name__, args) class HTTPError(Exception): """Exception thrown for an unsuccessful HTTP request. Attributes: * ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is used when no HTTP response was received, e.g. for a timeout. * ``response`` - `HTTPResponse` object, if any. Note that if ``follow_redirects`` is False, redirects become HTTPErrors, and you can look at ``error.response.headers['Location']`` to see the destination of the redirect. """ def __init__(self, code, message=None, response=None): self.code = code self.message = message or httputil.responses.get(code, "Unknown") self.response = response super(HTTPError, self).__init__(code, message, response) def __str__(self): return "HTTP %d: %s" % (self.code, self.message) # There is a cyclic reference between self and self.response, # which breaks the default __repr__ implementation. # (especially on pypy, which doesn't have the same recursion # detection as cpython). __repr__ = __str__ class _RequestProxy(object): """Combines an object with a dictionary of defaults. Used internally by AsyncHTTPClient implementations. """ def __init__(self, request, defaults): self.request = request self.defaults = defaults def __getattr__(self, name): request_attr = getattr(self.request, name) if request_attr is not None: return request_attr elif self.defaults is not None: return self.defaults.get(name, None) else: return None def main(): from tornado.options import define, options, parse_command_line define("print_headers", type=bool, default=False) define("print_body", type=bool, default=True) define("follow_redirects", type=bool, default=True) define("validate_cert", type=bool, default=True) args = parse_command_line() client = HTTPClient() for arg in args: try: response = client.fetch(arg, follow_redirects=options.follow_redirects, validate_cert=options.validate_cert, ) except HTTPError as e: if e.response is not None: response = e.response else: raise if options.print_headers: print(response.headers) if options.print_body: print(native_str(response.body)) client.close() if __name__ == "__main__": main() tornado-4.5.3/tornado/httpserver.py000066400000000000000000000307441322420601000174150ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A non-blocking, single-threaded HTTP server. Typical applications have little direct interaction with the `HTTPServer` class except to start a server at the beginning of the process (and even that is often done indirectly via `tornado.web.Application.listen`). .. versionchanged:: 4.0 The ``HTTPRequest`` class that used to live in this module has been moved to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias. """ from __future__ import absolute_import, division, print_function import socket from tornado.escape import native_str from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters from tornado import gen from tornado import httputil from tornado import iostream from tornado import netutil from tornado.tcpserver import TCPServer from tornado.util import Configurable class HTTPServer(TCPServer, Configurable, httputil.HTTPServerConnectionDelegate): r"""A non-blocking, single-threaded HTTP server. A server is defined by a subclass of `.HTTPServerConnectionDelegate`, or, for backwards compatibility, a callback that takes an `.HTTPServerRequest` as an argument. The delegate is usually a `tornado.web.Application`. `HTTPServer` supports keep-alive connections by default (automatically for HTTP/1.1, or for HTTP/1.0 when the client requests ``Connection: keep-alive``). If ``xheaders`` is ``True``, we support the ``X-Real-Ip``/``X-Forwarded-For`` and ``X-Scheme``/``X-Forwarded-Proto`` headers, which override the remote IP and URI scheme/protocol for all requests. These headers are useful when running Tornado behind a reverse proxy or load balancer. The ``protocol`` argument can also be set to ``https`` if Tornado is run behind an SSL-decoding proxy that does not set one of the supported ``xheaders``. By default, when parsing the ``X-Forwarded-For`` header, Tornado will select the last (i.e., the closest) address on the list of hosts as the remote host IP address. To select the next server in the chain, a list of trusted downstream hosts may be passed as the ``trusted_downstream`` argument. These hosts will be skipped when parsing the ``X-Forwarded-For`` header. To make this server serve SSL traffic, send the ``ssl_options`` keyword argument with an `ssl.SSLContext` object. For compatibility with older versions of Python ``ssl_options`` may also be a dictionary of keyword arguments for the `ssl.wrap_socket` method.:: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), os.path.join(data_dir, "mydomain.key")) HTTPServer(applicaton, ssl_options=ssl_ctx) `HTTPServer` initialization follows one of three patterns (the initialization methods are defined on `tornado.tcpserver.TCPServer`): 1. `~tornado.tcpserver.TCPServer.listen`: simple single-process:: server = HTTPServer(app) server.listen(8888) IOLoop.current().start() In many cases, `tornado.web.Application.listen` can be used to avoid the need to explicitly create the `HTTPServer`. 2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`: simple multi-process:: server = HTTPServer(app) server.bind(8888) server.start(0) # Forks multiple sub-processes IOLoop.current().start() When using this interface, an `.IOLoop` must *not* be passed to the `HTTPServer` constructor. `~.TCPServer.start` will always start the server on the default singleton `.IOLoop`. 3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process:: sockets = tornado.netutil.bind_sockets(8888) tornado.process.fork_processes(0) server = HTTPServer(app) server.add_sockets(sockets) IOLoop.current().start() The `~.TCPServer.add_sockets` interface is more complicated, but it can be used with `tornado.process.fork_processes` to give you more flexibility in when the fork happens. `~.TCPServer.add_sockets` can also be used in single-process servers if you want to create your listening sockets in some way other than `tornado.netutil.bind_sockets`. .. versionchanged:: 4.0 Added ``decompress_request``, ``chunk_size``, ``max_header_size``, ``idle_connection_timeout``, ``body_timeout``, ``max_body_size`` arguments. Added support for `.HTTPServerConnectionDelegate` instances as ``request_callback``. .. versionchanged:: 4.1 `.HTTPServerConnectionDelegate.start_request` is now called with two arguments ``(server_conn, request_conn)`` (in accordance with the documentation) instead of one ``(request_conn)``. .. versionchanged:: 4.2 `HTTPServer` is now a subclass of `tornado.util.Configurable`. .. versionchanged:: 4.5 Added the ``trusted_downstream`` argument. """ def __init__(self, *args, **kwargs): # Ignore args to __init__; real initialization belongs in # initialize since we're Configurable. (there's something # weird in initialization order between this class, # Configurable, and TCPServer so we can't leave __init__ out # completely) pass def initialize(self, request_callback, no_keep_alive=False, io_loop=None, xheaders=False, ssl_options=None, protocol=None, decompress_request=False, chunk_size=None, max_header_size=None, idle_connection_timeout=None, body_timeout=None, max_body_size=None, max_buffer_size=None, trusted_downstream=None): self.request_callback = request_callback self.no_keep_alive = no_keep_alive self.xheaders = xheaders self.protocol = protocol self.conn_params = HTTP1ConnectionParameters( decompress=decompress_request, chunk_size=chunk_size, max_header_size=max_header_size, header_timeout=idle_connection_timeout or 3600, max_body_size=max_body_size, body_timeout=body_timeout, no_keep_alive=no_keep_alive) TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options, max_buffer_size=max_buffer_size, read_chunk_size=chunk_size) self._connections = set() self.trusted_downstream = trusted_downstream @classmethod def configurable_base(cls): return HTTPServer @classmethod def configurable_default(cls): return HTTPServer @gen.coroutine def close_all_connections(self): while self._connections: # Peek at an arbitrary element of the set conn = next(iter(self._connections)) yield conn.close() def handle_stream(self, stream, address): context = _HTTPRequestContext(stream, address, self.protocol, self.trusted_downstream) conn = HTTP1ServerConnection( stream, self.conn_params, context) self._connections.add(conn) conn.start_serving(self) def start_request(self, server_conn, request_conn): if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate): delegate = self.request_callback.start_request(server_conn, request_conn) else: delegate = _CallableAdapter(self.request_callback, request_conn) if self.xheaders: delegate = _ProxyAdapter(delegate, request_conn) return delegate def on_close(self, server_conn): self._connections.remove(server_conn) class _CallableAdapter(httputil.HTTPMessageDelegate): def __init__(self, request_callback, request_conn): self.connection = request_conn self.request_callback = request_callback self.request = None self.delegate = None self._chunks = [] def headers_received(self, start_line, headers): self.request = httputil.HTTPServerRequest( connection=self.connection, start_line=start_line, headers=headers) def data_received(self, chunk): self._chunks.append(chunk) def finish(self): self.request.body = b''.join(self._chunks) self.request._parse_body() self.request_callback(self.request) def on_connection_close(self): self._chunks = None class _HTTPRequestContext(object): def __init__(self, stream, address, protocol, trusted_downstream=None): self.address = address # Save the socket's address family now so we know how to # interpret self.address even after the stream is closed # and its socket attribute replaced with None. if stream.socket is not None: self.address_family = stream.socket.family else: self.address_family = None # In HTTPServerRequest we want an IP, not a full socket address. if (self.address_family in (socket.AF_INET, socket.AF_INET6) and address is not None): self.remote_ip = address[0] else: # Unix (or other) socket; fake the remote address. self.remote_ip = '0.0.0.0' if protocol: self.protocol = protocol elif isinstance(stream, iostream.SSLIOStream): self.protocol = "https" else: self.protocol = "http" self._orig_remote_ip = self.remote_ip self._orig_protocol = self.protocol self.trusted_downstream = set(trusted_downstream or []) def __str__(self): if self.address_family in (socket.AF_INET, socket.AF_INET6): return self.remote_ip elif isinstance(self.address, bytes): # Python 3 with the -bb option warns about str(bytes), # so convert it explicitly. # Unix socket addresses are str on mac but bytes on linux. return native_str(self.address) else: return str(self.address) def _apply_xheaders(self, headers): """Rewrite the ``remote_ip`` and ``protocol`` fields.""" # Squid uses X-Forwarded-For, others use X-Real-Ip ip = headers.get("X-Forwarded-For", self.remote_ip) # Skip trusted downstream hosts in X-Forwarded-For list for ip in (cand.strip() for cand in reversed(ip.split(','))): if ip not in self.trusted_downstream: break ip = headers.get("X-Real-Ip", ip) if netutil.is_valid_ip(ip): self.remote_ip = ip # AWS uses X-Forwarded-Proto proto_header = headers.get( "X-Scheme", headers.get("X-Forwarded-Proto", self.protocol)) if proto_header in ("http", "https"): self.protocol = proto_header def _unapply_xheaders(self): """Undo changes from `_apply_xheaders`. Xheaders are per-request so they should not leak to the next request on the same connection. """ self.remote_ip = self._orig_remote_ip self.protocol = self._orig_protocol class _ProxyAdapter(httputil.HTTPMessageDelegate): def __init__(self, delegate, request_conn): self.connection = request_conn self.delegate = delegate def headers_received(self, start_line, headers): self.connection.context._apply_xheaders(headers) return self.delegate.headers_received(start_line, headers) def data_received(self, chunk): return self.delegate.data_received(chunk) def finish(self): self.delegate.finish() self._cleanup() def on_connection_close(self): self.delegate.on_connection_close() self._cleanup() def _cleanup(self): self.connection.context._unapply_xheaders() HTTPRequest = httputil.HTTPServerRequest tornado-4.5.3/tornado/httputil.py000066400000000000000000001003611322420601000170550ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HTTP utility code shared by clients and servers. This module also defines the `HTTPServerRequest` class which is exposed via `tornado.web.RequestHandler.request`. """ from __future__ import absolute_import, division, print_function import calendar import collections import copy import datetime import email.utils import numbers import re import time from tornado.escape import native_str, parse_qs_bytes, utf8 from tornado.log import gen_log from tornado.util import ObjectDict, PY3 if PY3: import http.cookies as Cookie from http.client import responses from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl else: import Cookie from httplib import responses from urllib import urlencode from urlparse import urlparse, urlunparse, parse_qsl # responses is unused in this file, but we re-export it to other files. # Reference it so pyflakes doesn't complain. responses try: from ssl import SSLError except ImportError: # ssl is unavailable on app engine. class _SSLError(Exception): pass # Hack around a mypy limitation. We can't simply put "type: ignore" # on the class definition itself; must go through an assignment. SSLError = _SSLError # type: ignore try: import typing except ImportError: pass # RFC 7230 section 3.5: a recipient MAY recognize a single LF as a line # terminator and ignore any preceding CR. _CRLF_RE = re.compile(r'\r?\n') class _NormalizedHeaderCache(dict): """Dynamic cached mapping of header names to Http-Header-Case. Implemented as a dict subclass so that cache hits are as fast as a normal dict lookup, without the overhead of a python function call. >>> normalized_headers = _NormalizedHeaderCache(10) >>> normalized_headers["coNtent-TYPE"] 'Content-Type' """ def __init__(self, size): super(_NormalizedHeaderCache, self).__init__() self.size = size self.queue = collections.deque() def __missing__(self, key): normalized = "-".join([w.capitalize() for w in key.split("-")]) self[key] = normalized self.queue.append(key) if len(self.queue) > self.size: # Limit the size of the cache. LRU would be better, but this # simpler approach should be fine. In Python 2.7+ we could # use OrderedDict (or in 3.2+, @functools.lru_cache). old_key = self.queue.popleft() del self[old_key] return normalized _normalized_headers = _NormalizedHeaderCache(1000) class HTTPHeaders(collections.MutableMapping): """A dictionary that maintains ``Http-Header-Case`` for all keys. Supports multiple values per key via a pair of new methods, `add()` and `get_list()`. The regular dictionary interface returns a single value per key, with multiple values joined by a comma. >>> h = HTTPHeaders({"content-type": "text/html"}) >>> list(h.keys()) ['Content-Type'] >>> h["Content-Type"] 'text/html' >>> h.add("Set-Cookie", "A=B") >>> h.add("Set-Cookie", "C=D") >>> h["set-cookie"] 'A=B,C=D' >>> h.get_list("set-cookie") ['A=B', 'C=D'] >>> for (k,v) in sorted(h.get_all()): ... print('%s: %s' % (k,v)) ... Content-Type: text/html Set-Cookie: A=B Set-Cookie: C=D """ def __init__(self, *args, **kwargs): self._dict = {} # type: typing.Dict[str, str] self._as_list = {} # type: typing.Dict[str, typing.List[str]] self._last_key = None if (len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], HTTPHeaders)): # Copy constructor for k, v in args[0].get_all(): self.add(k, v) else: # Dict-style initialization self.update(*args, **kwargs) # new public methods def add(self, name, value): # type: (str, str) -> None """Adds a new value for the given key.""" norm_name = _normalized_headers[name] self._last_key = norm_name if norm_name in self: self._dict[norm_name] = (native_str(self[norm_name]) + ',' + native_str(value)) self._as_list[norm_name].append(value) else: self[norm_name] = value def get_list(self, name): """Returns all values for the given header as a list.""" norm_name = _normalized_headers[name] return self._as_list.get(norm_name, []) def get_all(self): # type: () -> typing.Iterable[typing.Tuple[str, str]] """Returns an iterable of all (name, value) pairs. If a header has multiple values, multiple pairs will be returned with the same name. """ for name, values in self._as_list.items(): for value in values: yield (name, value) def parse_line(self, line): """Updates the dictionary with a single header line. >>> h = HTTPHeaders() >>> h.parse_line("Content-Type: text/html") >>> h.get('content-type') 'text/html' """ if line[0].isspace(): # continuation of a multi-line header new_part = ' ' + line.lstrip() self._as_list[self._last_key][-1] += new_part self._dict[self._last_key] += new_part else: name, value = line.split(":", 1) self.add(name, value.strip()) @classmethod def parse(cls, headers): """Returns a dictionary from HTTP header text. >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") >>> sorted(h.items()) [('Content-Length', '42'), ('Content-Type', 'text/html')] """ h = cls() for line in _CRLF_RE.split(headers): if line: h.parse_line(line) return h # MutableMapping abstract method implementations. def __setitem__(self, name, value): norm_name = _normalized_headers[name] self._dict[norm_name] = value self._as_list[norm_name] = [value] def __getitem__(self, name): # type: (str) -> str return self._dict[_normalized_headers[name]] def __delitem__(self, name): norm_name = _normalized_headers[name] del self._dict[norm_name] del self._as_list[norm_name] def __len__(self): return len(self._dict) def __iter__(self): return iter(self._dict) def copy(self): # defined in dict but not in MutableMapping. return HTTPHeaders(self) # Use our overridden copy method for the copy.copy module. # This makes shallow copies one level deeper, but preserves # the appearance that HTTPHeaders is a single container. __copy__ = copy def __str__(self): lines = [] for name, value in self.get_all(): lines.append("%s: %s\n" % (name, value)) return "".join(lines) __unicode__ = __str__ class HTTPServerRequest(object): """A single HTTP request. All attributes are type `str` unless otherwise noted. .. attribute:: method HTTP request method, e.g. "GET" or "POST" .. attribute:: uri The requested uri. .. attribute:: path The path portion of `uri` .. attribute:: query The query portion of `uri` .. attribute:: version HTTP version specified in request, e.g. "HTTP/1.1" .. attribute:: headers `.HTTPHeaders` dictionary-like object for request headers. Acts like a case-insensitive dictionary with additional methods for repeated headers. .. attribute:: body Request body, if present, as a byte string. .. attribute:: remote_ip Client's IP address as a string. If ``HTTPServer.xheaders`` is set, will pass along the real IP address provided by a load balancer in the ``X-Real-Ip`` or ``X-Forwarded-For`` header. .. versionchanged:: 3.1 The list format of ``X-Forwarded-For`` is now supported. .. attribute:: protocol The protocol used, either "http" or "https". If ``HTTPServer.xheaders`` is set, will pass along the protocol used by a load balancer if reported via an ``X-Scheme`` header. .. attribute:: host The requested hostname, usually taken from the ``Host`` header. .. attribute:: arguments GET/POST arguments are available in the arguments property, which maps arguments names to lists of values (to support multiple values for individual names). Names are of type `str`, while arguments are byte strings. Note that this is different from `.RequestHandler.get_argument`, which returns argument values as unicode strings. .. attribute:: query_arguments Same format as ``arguments``, but contains only arguments extracted from the query string. .. versionadded:: 3.2 .. attribute:: body_arguments Same format as ``arguments``, but contains only arguments extracted from the request body. .. versionadded:: 3.2 .. attribute:: files File uploads are available in the files property, which maps file names to lists of `.HTTPFile`. .. attribute:: connection An HTTP request is attached to a single HTTP connection, which can be accessed through the "connection" attribute. Since connections are typically kept open in HTTP/1.1, multiple requests can be handled sequentially on a single connection. .. versionchanged:: 4.0 Moved from ``tornado.httpserver.HTTPRequest``. """ def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None, body=None, host=None, files=None, connection=None, start_line=None, server_connection=None): if start_line is not None: method, uri, version = start_line self.method = method self.uri = uri self.version = version self.headers = headers or HTTPHeaders() self.body = body or b"" # set remote IP and protocol context = getattr(connection, 'context', None) self.remote_ip = getattr(context, 'remote_ip', None) self.protocol = getattr(context, 'protocol', "http") self.host = host or self.headers.get("Host") or "127.0.0.1" self.host_name = split_host_and_port(self.host.lower())[0] self.files = files or {} self.connection = connection self.server_connection = server_connection self._start_time = time.time() self._finish_time = None self.path, sep, self.query = uri.partition('?') self.arguments = parse_qs_bytes(self.query, keep_blank_values=True) self.query_arguments = copy.deepcopy(self.arguments) self.body_arguments = {} def supports_http_1_1(self): """Returns True if this request supports HTTP/1.1 semantics. .. deprecated:: 4.0 Applications are less likely to need this information with the introduction of `.HTTPConnection`. If you still need it, access the ``version`` attribute directly. """ return self.version == "HTTP/1.1" @property def cookies(self): """A dictionary of Cookie.Morsel objects.""" if not hasattr(self, "_cookies"): self._cookies = Cookie.SimpleCookie() if "Cookie" in self.headers: try: parsed = parse_cookie(self.headers["Cookie"]) except Exception: pass else: for k, v in parsed.items(): try: self._cookies[k] = v except Exception: # SimpleCookie imposes some restrictions on keys; # parse_cookie does not. Discard any cookies # with disallowed keys. pass return self._cookies def write(self, chunk, callback=None): """Writes the given chunk to the response stream. .. deprecated:: 4.0 Use ``request.connection`` and the `.HTTPConnection` methods to write the response. """ assert isinstance(chunk, bytes) assert self.version.startswith("HTTP/1."), \ "deprecated interface only supported in HTTP/1.x" self.connection.write(chunk, callback=callback) def finish(self): """Finishes this HTTP request on the open connection. .. deprecated:: 4.0 Use ``request.connection`` and the `.HTTPConnection` methods to write the response. """ self.connection.finish() self._finish_time = time.time() def full_url(self): """Reconstructs the full URL for this request.""" return self.protocol + "://" + self.host + self.uri def request_time(self): """Returns the amount of time it took for this request to execute.""" if self._finish_time is None: return time.time() - self._start_time else: return self._finish_time - self._start_time def get_ssl_certificate(self, binary_form=False): """Returns the client's SSL certificate, if any. To use client certificates, the HTTPServer's `ssl.SSLContext.verify_mode` field must be set, e.g.:: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain("foo.crt", "foo.key") ssl_ctx.load_verify_locations("cacerts.pem") ssl_ctx.verify_mode = ssl.CERT_REQUIRED server = HTTPServer(app, ssl_options=ssl_ctx) By default, the return value is a dictionary (or None, if no client certificate is present). If ``binary_form`` is true, a DER-encoded form of the certificate is returned instead. See SSLSocket.getpeercert() in the standard library for more details. http://docs.python.org/library/ssl.html#sslsocket-objects """ try: return self.connection.stream.socket.getpeercert( binary_form=binary_form) except SSLError: return None def _parse_body(self): parse_body_arguments( self.headers.get("Content-Type", ""), self.body, self.body_arguments, self.files, self.headers) for k, v in self.body_arguments.items(): self.arguments.setdefault(k, []).extend(v) def __repr__(self): attrs = ("protocol", "host", "method", "uri", "version", "remote_ip") args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) return "%s(%s, headers=%s)" % ( self.__class__.__name__, args, dict(self.headers)) class HTTPInputError(Exception): """Exception class for malformed HTTP requests or responses from remote sources. .. versionadded:: 4.0 """ pass class HTTPOutputError(Exception): """Exception class for errors in HTTP output. .. versionadded:: 4.0 """ pass class HTTPServerConnectionDelegate(object): """Implement this interface to handle requests from `.HTTPServer`. .. versionadded:: 4.0 """ def start_request(self, server_conn, request_conn): """This method is called by the server when a new request has started. :arg server_conn: is an opaque object representing the long-lived (e.g. tcp-level) connection. :arg request_conn: is a `.HTTPConnection` object for a single request/response exchange. This method should return a `.HTTPMessageDelegate`. """ raise NotImplementedError() def on_close(self, server_conn): """This method is called when a connection has been closed. :arg server_conn: is a server connection that has previously been passed to ``start_request``. """ pass class HTTPMessageDelegate(object): """Implement this interface to handle an HTTP request or response. .. versionadded:: 4.0 """ def headers_received(self, start_line, headers): """Called when the HTTP headers have been received and parsed. :arg start_line: a `.RequestStartLine` or `.ResponseStartLine` depending on whether this is a client or server message. :arg headers: a `.HTTPHeaders` instance. Some `.HTTPConnection` methods can only be called during ``headers_received``. May return a `.Future`; if it does the body will not be read until it is done. """ pass def data_received(self, chunk): """Called when a chunk of data has been received. May return a `.Future` for flow control. """ pass def finish(self): """Called after the last chunk of data has been received.""" pass def on_connection_close(self): """Called if the connection is closed without finishing the request. If ``headers_received`` is called, either ``finish`` or ``on_connection_close`` will be called, but not both. """ pass class HTTPConnection(object): """Applications use this interface to write their responses. .. versionadded:: 4.0 """ def write_headers(self, start_line, headers, chunk=None, callback=None): """Write an HTTP header block. :arg start_line: a `.RequestStartLine` or `.ResponseStartLine`. :arg headers: a `.HTTPHeaders` instance. :arg chunk: the first (optional) chunk of data. This is an optimization so that small responses can be written in the same call as their headers. :arg callback: a callback to be run when the write is complete. The ``version`` field of ``start_line`` is ignored. Returns a `.Future` if no callback is given. """ raise NotImplementedError() def write(self, chunk, callback=None): """Writes a chunk of body data. The callback will be run when the write is complete. If no callback is given, returns a Future. """ raise NotImplementedError() def finish(self): """Indicates that the last body data has been written. """ raise NotImplementedError() def url_concat(url, args): """Concatenate url and arguments regardless of whether url has existing query parameters. ``args`` may be either a dictionary or a list of key-value pairs (the latter allows for multiple values with the same key. >>> url_concat("http://example.com/foo", dict(c="d")) 'http://example.com/foo?c=d' >>> url_concat("http://example.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d' >>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")]) 'http://example.com/foo?a=b&c=d&c=d2' """ if args is None: return url parsed_url = urlparse(url) if isinstance(args, dict): parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) parsed_query.extend(args.items()) elif isinstance(args, list) or isinstance(args, tuple): parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True) parsed_query.extend(args) else: err = "'args' parameter should be dict, list or tuple. Not {0}".format( type(args)) raise TypeError(err) final_query = urlencode(parsed_query) url = urlunparse(( parsed_url[0], parsed_url[1], parsed_url[2], parsed_url[3], final_query, parsed_url[5])) return url class HTTPFile(ObjectDict): """Represents a file uploaded via a form. For backwards compatibility, its instance attributes are also accessible as dictionary keys. * ``filename`` * ``body`` * ``content_type`` """ pass def _parse_request_range(range_header): """Parses a Range header. Returns either ``None`` or tuple ``(start, end)``. Note that while the HTTP headers use inclusive byte positions, this method returns indexes suitable for use in slices. >>> start, end = _parse_request_range("bytes=1-2") >>> start, end (1, 3) >>> [0, 1, 2, 3, 4][start:end] [1, 2] >>> _parse_request_range("bytes=6-") (6, None) >>> _parse_request_range("bytes=-6") (-6, None) >>> _parse_request_range("bytes=-0") (None, 0) >>> _parse_request_range("bytes=") (None, None) >>> _parse_request_range("foo=42") >>> _parse_request_range("bytes=1-2,6-10") Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed). See [0] for the details of the range header. [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges """ unit, _, value = range_header.partition("=") unit, value = unit.strip(), value.strip() if unit != "bytes": return None start_b, _, end_b = value.partition("-") try: start = _int_or_none(start_b) end = _int_or_none(end_b) except ValueError: return None if end is not None: if start is None: if end != 0: start = -end end = None else: end += 1 return (start, end) def _get_content_range(start, end, total): """Returns a suitable Content-Range header: >>> print(_get_content_range(None, 1, 4)) bytes 0-0/4 >>> print(_get_content_range(1, 3, 4)) bytes 1-2/4 >>> print(_get_content_range(None, None, 4)) bytes 0-3/4 """ start = start or 0 end = (end or total) - 1 return "bytes %s-%s/%s" % (start, end, total) def _int_or_none(val): val = val.strip() if val == "": return None return int(val) def parse_body_arguments(content_type, body, arguments, files, headers=None): """Parses a form request body. Supports ``application/x-www-form-urlencoded`` and ``multipart/form-data``. The ``content_type`` parameter should be a string and ``body`` should be a byte string. The ``arguments`` and ``files`` parameters are dictionaries that will be updated with the parsed contents. """ if headers and 'Content-Encoding' in headers: gen_log.warning("Unsupported Content-Encoding: %s", headers['Content-Encoding']) return if content_type.startswith("application/x-www-form-urlencoded"): try: uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) except Exception as e: gen_log.warning('Invalid x-www-form-urlencoded body: %s', e) uri_arguments = {} for name, values in uri_arguments.items(): if values: arguments.setdefault(name, []).extend(values) elif content_type.startswith("multipart/form-data"): try: fields = content_type.split(";") for field in fields: k, sep, v = field.strip().partition("=") if k == "boundary" and v: parse_multipart_form_data(utf8(v), body, arguments, files) break else: raise ValueError("multipart boundary not found") except Exception as e: gen_log.warning("Invalid multipart/form-data: %s", e) def parse_multipart_form_data(boundary, data, arguments, files): """Parses a ``multipart/form-data`` body. The ``boundary`` and ``data`` parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body. """ # The standard allows for the boundary to be quoted in the header, # although it's rare (it happens at least for google app engine # xmpp). I think we're also supposed to handle backslash-escapes # here but I'll save that until we see a client that uses them # in the wild. if boundary.startswith(b'"') and boundary.endswith(b'"'): boundary = boundary[1:-1] final_boundary_index = data.rfind(b"--" + boundary + b"--") if final_boundary_index == -1: gen_log.warning("Invalid multipart/form-data: no final boundary") return parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n") for part in parts: if not part: continue eoh = part.find(b"\r\n\r\n") if eoh == -1: gen_log.warning("multipart/form-data missing headers") continue headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) disp_header = headers.get("Content-Disposition", "") disposition, disp_params = _parse_header(disp_header) if disposition != "form-data" or not part.endswith(b"\r\n"): gen_log.warning("Invalid multipart/form-data") continue value = part[eoh + 4:-2] if not disp_params.get("name"): gen_log.warning("multipart/form-data value missing name") continue name = disp_params["name"] if disp_params.get("filename"): ctype = headers.get("Content-Type", "application/unknown") files.setdefault(name, []).append(HTTPFile( # type: ignore filename=disp_params["filename"], body=value, content_type=ctype)) else: arguments.setdefault(name, []).append(value) def format_timestamp(ts): """Formats a timestamp in the format used by HTTP. The argument may be a numeric timestamp as returned by `time.time`, a time tuple as returned by `time.gmtime`, or a `datetime.datetime` object. >>> format_timestamp(1359312200) 'Sun, 27 Jan 2013 18:43:20 GMT' """ if isinstance(ts, numbers.Real): pass elif isinstance(ts, (tuple, time.struct_time)): ts = calendar.timegm(ts) elif isinstance(ts, datetime.datetime): ts = calendar.timegm(ts.utctimetuple()) else: raise TypeError("unknown timestamp type: %r" % ts) return email.utils.formatdate(ts, usegmt=True) RequestStartLine = collections.namedtuple( 'RequestStartLine', ['method', 'path', 'version']) def parse_request_start_line(line): """Returns a (method, path, version) tuple for an HTTP 1.x request line. The response is a `collections.namedtuple`. >>> parse_request_start_line("GET /foo HTTP/1.1") RequestStartLine(method='GET', path='/foo', version='HTTP/1.1') """ try: method, path, version = line.split(" ") except ValueError: raise HTTPInputError("Malformed HTTP request line") if not re.match(r"^HTTP/1\.[0-9]$", version): raise HTTPInputError( "Malformed HTTP version in HTTP Request-Line: %r" % version) return RequestStartLine(method, path, version) ResponseStartLine = collections.namedtuple( 'ResponseStartLine', ['version', 'code', 'reason']) def parse_response_start_line(line): """Returns a (version, code, reason) tuple for an HTTP 1.x response line. The response is a `collections.namedtuple`. >>> parse_response_start_line("HTTP/1.1 200 OK") ResponseStartLine(version='HTTP/1.1', code=200, reason='OK') """ line = native_str(line) match = re.match("(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)", line) if not match: raise HTTPInputError("Error parsing response start line") return ResponseStartLine(match.group(1), int(match.group(2)), match.group(3)) # _parseparam and _parse_header are copied and modified from python2.7's cgi.py # The original 2.7 version of this code did not correctly support some # combinations of semicolons and double quotes. # It has also been modified to support valueless parameters as seen in # websocket extension negotiations. def _parseparam(s): while s[:1] == ';': s = s[1:] end = s.find(';') while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: end = s.find(';', end + 1) if end < 0: end = len(s) f = s[:end] yield f.strip() s = s[end:] def _parse_header(line): """Parse a Content-type like header. Return the main content-type and a dictionary of options. """ parts = _parseparam(';' + line) key = next(parts) pdict = {} for p in parts: i = p.find('=') if i >= 0: name = p[:i].strip().lower() value = p[i + 1:].strip() if len(value) >= 2 and value[0] == value[-1] == '"': value = value[1:-1] value = value.replace('\\\\', '\\').replace('\\"', '"') pdict[name] = value else: pdict[p] = None return key, pdict def _encode_header(key, pdict): """Inverse of _parse_header. >>> _encode_header('permessage-deflate', ... {'client_max_window_bits': 15, 'client_no_context_takeover': None}) 'permessage-deflate; client_max_window_bits=15; client_no_context_takeover' """ if not pdict: return key out = [key] # Sort the parameters just to make it easy to test. for k, v in sorted(pdict.items()): if v is None: out.append(k) else: # TODO: quote if necessary. out.append('%s=%s' % (k, v)) return '; '.join(out) def doctests(): import doctest return doctest.DocTestSuite() def split_host_and_port(netloc): """Returns ``(host, port)`` tuple from ``netloc``. Returned ``port`` will be ``None`` if not present. .. versionadded:: 4.1 """ match = re.match(r'^(.+):(\d+)$', netloc) if match: host = match.group(1) port = int(match.group(2)) else: host = netloc port = None return (host, port) _OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") _QuotePatt = re.compile(r"[\\].") _nulljoin = ''.join def _unquote_cookie(str): """Handle double quotes and escaping in cookie values. This method is copied verbatim from the Python 3.5 standard library (http.cookies._unquote) so we don't have to depend on non-public interfaces. """ # If there aren't any doublequotes, # then there can't be any special characters. See RFC 2109. if str is None or len(str) < 2: return str if str[0] != '"' or str[-1] != '"': return str # We have to assume that we must decode this string. # Down to work. # Remove the "s str = str[1:-1] # Check for special sequences. Examples: # \012 --> \n # \" --> " # i = 0 n = len(str) res = [] while 0 <= i < n: o_match = _OctalPatt.search(str, i) q_match = _QuotePatt.search(str, i) if not o_match and not q_match: # Neither matched res.append(str[i:]) break # else: j = k = -1 if o_match: j = o_match.start(0) if q_match: k = q_match.start(0) if q_match and (not o_match or k < j): # QuotePatt matched res.append(str[i:k]) res.append(str[k + 1]) i = k + 2 else: # OctalPatt matched res.append(str[i:j]) res.append(chr(int(str[j + 1:j + 4], 8))) i = j + 4 return _nulljoin(res) def parse_cookie(cookie): """Parse a ``Cookie`` HTTP header into a dict of name/value pairs. This function attempts to mimic browser cookie parsing behavior; it specifically does not follow any of the cookie-related RFCs (because browsers don't either). The algorithm used is identical to that used by Django version 1.9.10. .. versionadded:: 4.4.2 """ cookiedict = {} for chunk in cookie.split(str(';')): if str('=') in chunk: key, val = chunk.split(str('='), 1) else: # Assume an empty name per # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 key, val = str(''), chunk key, val = key.strip(), val.strip() if key or val: # unquote using Python's algorithm. cookiedict[key] = _unquote_cookie(val) return cookiedict tornado-4.5.3/tornado/ioloop.py000066400000000000000000001166641322420601000165160ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """An I/O event loop for non-blocking sockets. Typical applications will use a single `IOLoop` object, in the `IOLoop.instance` singleton. The `IOLoop.start` method should usually be called at the end of the ``main()`` function. Atypical applications may use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest` case. In addition to I/O events, the `IOLoop` can also schedule time-based events. `IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`. """ from __future__ import absolute_import, division, print_function import collections import datetime import errno import functools import heapq import itertools import logging import numbers import os import select import sys import threading import time import traceback import math from tornado.concurrent import TracebackFuture, is_future from tornado.log import app_log, gen_log from tornado.platform.auto import set_close_exec, Waker from tornado import stack_context from tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds try: import signal except ImportError: signal = None if PY3: import _thread as thread else: import thread _POLL_TIMEOUT = 3600.0 class TimeoutError(Exception): pass class IOLoop(Configurable): """A level-triggered I/O loop. We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they are available, or else we fall back on select(). If you are implementing a system that needs to handle thousands of simultaneous connections, you should use a system that supports either ``epoll`` or ``kqueue``. Example usage for a simple TCP server: .. testcode:: import errno import functools import tornado.ioloop import socket def connection_ready(sock, fd, events): while True: try: connection, address = sock.accept() except socket.error as e: if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): raise return connection.setblocking(0) handle_connection(connection, address) if __name__ == '__main__': sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(0) sock.bind(("", port)) sock.listen(128) io_loop = tornado.ioloop.IOLoop.current() callback = functools.partial(connection_ready, sock) io_loop.add_handler(sock.fileno(), callback, io_loop.READ) io_loop.start() .. testoutput:: :hide: By default, a newly-constructed `IOLoop` becomes the thread's current `IOLoop`, unless there already is a current `IOLoop`. This behavior can be controlled with the ``make_current`` argument to the `IOLoop` constructor: if ``make_current=True``, the new `IOLoop` will always try to become current and it raises an error if there is already a current instance. If ``make_current=False``, the new `IOLoop` will not try to become current. .. versionchanged:: 4.2 Added the ``make_current`` keyword argument to the `IOLoop` constructor. """ # Constants from the epoll module _EPOLLIN = 0x001 _EPOLLPRI = 0x002 _EPOLLOUT = 0x004 _EPOLLERR = 0x008 _EPOLLHUP = 0x010 _EPOLLRDHUP = 0x2000 _EPOLLONESHOT = (1 << 30) _EPOLLET = (1 << 31) # Our events map exactly to the epoll events NONE = 0 READ = _EPOLLIN WRITE = _EPOLLOUT ERROR = _EPOLLERR | _EPOLLHUP # Global lock for creating global IOLoop instance _instance_lock = threading.Lock() _current = threading.local() @staticmethod def instance(): """Returns a global `IOLoop` instance. Most applications have a single, global `IOLoop` running on the main thread. Use this method to get this instance from another thread. In most other cases, it is better to use `current()` to get the current thread's `IOLoop`. """ if not hasattr(IOLoop, "_instance"): with IOLoop._instance_lock: if not hasattr(IOLoop, "_instance"): # New instance after double check IOLoop._instance = IOLoop() return IOLoop._instance @staticmethod def initialized(): """Returns true if the singleton instance has been created.""" return hasattr(IOLoop, "_instance") def install(self): """Installs this `IOLoop` object as the singleton instance. This is normally not necessary as `instance()` will create an `IOLoop` on demand, but you may want to call `install` to use a custom subclass of `IOLoop`. When using an `IOLoop` subclass, `install` must be called prior to creating any objects that implicitly create their own `IOLoop` (e.g., :class:`tornado.httpclient.AsyncHTTPClient`). """ assert not IOLoop.initialized() IOLoop._instance = self @staticmethod def clear_instance(): """Clear the global `IOLoop` instance. .. versionadded:: 4.0 """ if hasattr(IOLoop, "_instance"): del IOLoop._instance @staticmethod def current(instance=True): """Returns the current thread's `IOLoop`. If an `IOLoop` is currently running or has been marked as current by `make_current`, returns that instance. If there is no current `IOLoop`, returns `IOLoop.instance()` (i.e. the main thread's `IOLoop`, creating one if necessary) if ``instance`` is true. In general you should use `IOLoop.current` as the default when constructing an asynchronous object, and use `IOLoop.instance` when you mean to communicate to the main thread from a different one. .. versionchanged:: 4.1 Added ``instance`` argument to control the fallback to `IOLoop.instance()`. """ current = getattr(IOLoop._current, "instance", None) if current is None and instance: return IOLoop.instance() return current def make_current(self): """Makes this the `IOLoop` for the current thread. An `IOLoop` automatically becomes current for its thread when it is started, but it is sometimes useful to call `make_current` explicitly before starting the `IOLoop`, so that code run at startup time can find the right instance. .. versionchanged:: 4.1 An `IOLoop` created while there is no current `IOLoop` will automatically become current. """ IOLoop._current.instance = self @staticmethod def clear_current(): IOLoop._current.instance = None @classmethod def configurable_base(cls): return IOLoop @classmethod def configurable_default(cls): if hasattr(select, "epoll"): from tornado.platform.epoll import EPollIOLoop return EPollIOLoop if hasattr(select, "kqueue"): # Python 2.6+ on BSD or Mac from tornado.platform.kqueue import KQueueIOLoop return KQueueIOLoop from tornado.platform.select import SelectIOLoop return SelectIOLoop def initialize(self, make_current=None): if make_current is None: if IOLoop.current(instance=False) is None: self.make_current() elif make_current: if IOLoop.current(instance=False) is not None: raise RuntimeError("current IOLoop already exists") self.make_current() def close(self, all_fds=False): """Closes the `IOLoop`, freeing any resources used. If ``all_fds`` is true, all file descriptors registered on the IOLoop will be closed (not just the ones created by the `IOLoop` itself). Many applications will only use a single `IOLoop` that runs for the entire lifetime of the process. In that case closing the `IOLoop` is not necessary since everything will be cleaned up when the process exits. `IOLoop.close` is provided mainly for scenarios such as unit tests, which create and destroy a large number of ``IOLoops``. An `IOLoop` must be completely stopped before it can be closed. This means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must be allowed to return before attempting to call `IOLoop.close()`. Therefore the call to `close` will usually appear just after the call to `start` rather than near the call to `stop`. .. versionchanged:: 3.1 If the `IOLoop` implementation supports non-integer objects for "file descriptors", those objects will have their ``close`` method when ``all_fds`` is true. """ raise NotImplementedError() def add_handler(self, fd, handler, events): """Registers the given handler to receive the given events for ``fd``. The ``fd`` argument may either be an integer file descriptor or a file-like object with a ``fileno()`` method (and optionally a ``close()`` method, which may be called when the `IOLoop` is shut down). The ``events`` argument is a bitwise or of the constants ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. When an event occurs, ``handler(fd, events)`` will be run. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def update_handler(self, fd, events): """Changes the events we listen for ``fd``. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def remove_handler(self, fd): """Stop listening for events on ``fd``. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def set_blocking_signal_threshold(self, seconds, action): """Sends a signal if the `IOLoop` is blocked for more than ``s`` seconds. Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy platform. The action parameter is a Python signal handler. Read the documentation for the `signal` module for more information. If ``action`` is None, the process will be killed if it is blocked for too long. """ raise NotImplementedError() def set_blocking_log_threshold(self, seconds): """Logs a stack trace if the `IOLoop` is blocked for more than ``s`` seconds. Equivalent to ``set_blocking_signal_threshold(seconds, self.log_stack)`` """ self.set_blocking_signal_threshold(seconds, self.log_stack) def log_stack(self, signal, frame): """Signal handler to log the stack trace of the current thread. For use with `set_blocking_signal_threshold`. """ gen_log.warning('IOLoop blocked for %f seconds in\n%s', self._blocking_signal_threshold, ''.join(traceback.format_stack(frame))) def start(self): """Starts the I/O loop. The loop will run until one of the callbacks calls `stop()`, which will make the loop stop after the current event iteration completes. """ raise NotImplementedError() def _setup_logging(self): """The IOLoop catches and logs exceptions, so it's important that log output be visible. However, python's default behavior for non-root loggers (prior to python 3.2) is to print an unhelpful "no handlers could be found" message rather than the actual log entry, so we must explicitly configure logging if we've made it this far without anything. This method should be called from start() in subclasses. """ if not any([logging.getLogger().handlers, logging.getLogger('tornado').handlers, logging.getLogger('tornado.application').handlers]): logging.basicConfig() def stop(self): """Stop the I/O loop. If the event loop is not currently running, the next call to `start()` will return immediately. To use asynchronous methods from otherwise-synchronous code (such as unit tests), you can start and stop the event loop like this:: ioloop = IOLoop() async_method(ioloop=ioloop, callback=ioloop.stop) ioloop.start() ``ioloop.start()`` will return after ``async_method`` has run its callback, whether that callback was invoked before or after ``ioloop.start``. Note that even after `stop` has been called, the `IOLoop` is not completely stopped until `IOLoop.start` has also returned. Some work that was scheduled before the call to `stop` may still be run before the `IOLoop` shuts down. """ raise NotImplementedError() def run_sync(self, func, timeout=None): """Starts the `IOLoop`, runs the given function, and stops the loop. The function must return either a yieldable object or ``None``. If the function returns a yieldable object, the `IOLoop` will run until the yieldable is resolved (and `run_sync()` will return the yieldable's result). If it raises an exception, the `IOLoop` will stop and the exception will be re-raised to the caller. The keyword-only argument ``timeout`` may be used to set a maximum duration for the function. If the timeout expires, a `TimeoutError` is raised. This method is useful in conjunction with `tornado.gen.coroutine` to allow asynchronous calls in a ``main()`` function:: @gen.coroutine def main(): # do stuff... if __name__ == '__main__': IOLoop.current().run_sync(main) .. versionchanged:: 4.3 Returning a non-``None``, non-yieldable value is now an error. """ future_cell = [None] def run(): try: result = func() if result is not None: from tornado.gen import convert_yielded result = convert_yielded(result) except Exception: future_cell[0] = TracebackFuture() future_cell[0].set_exc_info(sys.exc_info()) else: if is_future(result): future_cell[0] = result else: future_cell[0] = TracebackFuture() future_cell[0].set_result(result) self.add_future(future_cell[0], lambda future: self.stop()) self.add_callback(run) if timeout is not None: timeout_handle = self.add_timeout(self.time() + timeout, self.stop) self.start() if timeout is not None: self.remove_timeout(timeout_handle) if not future_cell[0].done(): raise TimeoutError('Operation timed out after %s seconds' % timeout) return future_cell[0].result() def time(self): """Returns the current time according to the `IOLoop`'s clock. The return value is a floating-point number relative to an unspecified time in the past. By default, the `IOLoop`'s time function is `time.time`. However, it may be configured to use e.g. `time.monotonic` instead. Calls to `add_timeout` that pass a number instead of a `datetime.timedelta` should use this function to compute the appropriate time, so they can work no matter what time function is chosen. """ return time.time() def add_timeout(self, deadline, callback, *args, **kwargs): """Runs the ``callback`` at the time ``deadline`` from the I/O loop. Returns an opaque handle that may be passed to `remove_timeout` to cancel. ``deadline`` may be a number denoting a time (on the same scale as `IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time. Since Tornado 4.0, `call_later` is a more convenient alternative for the relative case since it does not require a timedelta object. Note that it is not safe to call `add_timeout` from other threads. Instead, you must use `add_callback` to transfer control to the `IOLoop`'s thread, and then call `add_timeout` from there. Subclasses of IOLoop must implement either `add_timeout` or `call_at`; the default implementations of each will call the other. `call_at` is usually easier to implement, but subclasses that wish to maintain compatibility with Tornado versions prior to 4.0 must use `add_timeout` instead. .. versionchanged:: 4.0 Now passes through ``*args`` and ``**kwargs`` to the callback. """ if isinstance(deadline, numbers.Real): return self.call_at(deadline, callback, *args, **kwargs) elif isinstance(deadline, datetime.timedelta): return self.call_at(self.time() + timedelta_to_seconds(deadline), callback, *args, **kwargs) else: raise TypeError("Unsupported deadline %r" % deadline) def call_later(self, delay, callback, *args, **kwargs): """Runs the ``callback`` after ``delay`` seconds have passed. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.call_at(self.time() + delay, callback, *args, **kwargs) def call_at(self, when, callback, *args, **kwargs): """Runs the ``callback`` at the absolute time designated by ``when``. ``when`` must be a number using the same reference point as `IOLoop.time`. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.add_timeout(when, callback, *args, **kwargs) def remove_timeout(self, timeout): """Cancels a pending timeout. The argument is a handle as returned by `add_timeout`. It is safe to call `remove_timeout` even if the callback has already been run. """ raise NotImplementedError() def add_callback(self, callback, *args, **kwargs): """Calls the given callback on the next I/O loop iteration. It is safe to call this method from any thread at any time, except from a signal handler. Note that this is the **only** method in `IOLoop` that makes this thread-safety guarantee; all other interaction with the `IOLoop` must be done from that `IOLoop`'s thread. `add_callback()` may be used to transfer control from other threads to the `IOLoop`'s thread. To add a callback from a signal handler, see `add_callback_from_signal`. """ raise NotImplementedError() def add_callback_from_signal(self, callback, *args, **kwargs): """Calls the given callback on the next I/O loop iteration. Safe for use from a Python signal handler; should not be used otherwise. Callbacks added with this method will be run without any `.stack_context`, to avoid picking up the context of the function that was interrupted by the signal. """ raise NotImplementedError() def spawn_callback(self, callback, *args, **kwargs): """Calls the given callback on the next IOLoop iteration. Unlike all other callback-related methods on IOLoop, ``spawn_callback`` does not associate the callback with its caller's ``stack_context``, so it is suitable for fire-and-forget callbacks that should not interfere with the caller. .. versionadded:: 4.0 """ with stack_context.NullContext(): self.add_callback(callback, *args, **kwargs) def add_future(self, future, callback): """Schedules a callback on the ``IOLoop`` when the given `.Future` is finished. The callback is invoked with one argument, the `.Future`. """ assert is_future(future) callback = stack_context.wrap(callback) future.add_done_callback( lambda future: self.add_callback(callback, future)) def _run_callback(self, callback): """Runs a callback with error handling. For use in subclasses. """ try: ret = callback() if ret is not None: from tornado import gen # Functions that return Futures typically swallow all # exceptions and store them in the Future. If a Future # makes it out to the IOLoop, ensure its exception (if any) # gets logged too. try: ret = gen.convert_yielded(ret) except gen.BadYieldError: # It's not unusual for add_callback to be used with # methods returning a non-None and non-yieldable # result, which should just be ignored. pass else: self.add_future(ret, self._discard_future_result) except Exception: self.handle_callback_exception(callback) def _discard_future_result(self, future): """Avoid unhandled-exception warnings from spawned coroutines.""" future.result() def handle_callback_exception(self, callback): """This method is called whenever a callback run by the `IOLoop` throws an exception. By default simply logs the exception as an error. Subclasses may override this method to customize reporting of exceptions. The exception itself is not passed explicitly, but is available in `sys.exc_info`. """ app_log.error("Exception in callback %r", callback, exc_info=True) def split_fd(self, fd): """Returns an (fd, obj) pair from an ``fd`` parameter. We accept both raw file descriptors and file-like objects as input to `add_handler` and related methods. When a file-like object is passed, we must retain the object itself so we can close it correctly when the `IOLoop` shuts down, but the poller interfaces favor file descriptors (they will accept file-like objects and call ``fileno()`` for you, but they always return the descriptor itself). This method is provided for use by `IOLoop` subclasses and should not generally be used by application code. .. versionadded:: 4.0 """ try: return fd.fileno(), fd except AttributeError: return fd, fd def close_fd(self, fd): """Utility method to close an ``fd``. If ``fd`` is a file-like object, we close it directly; otherwise we use `os.close`. This method is provided for use by `IOLoop` subclasses (in implementations of ``IOLoop.close(all_fds=True)`` and should not generally be used by application code. .. versionadded:: 4.0 """ try: try: fd.close() except AttributeError: os.close(fd) except OSError: pass class PollIOLoop(IOLoop): """Base class for IOLoops built around a select-like function. For concrete implementations, see `tornado.platform.epoll.EPollIOLoop` (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or `tornado.platform.select.SelectIOLoop` (all platforms). """ def initialize(self, impl, time_func=None, **kwargs): super(PollIOLoop, self).initialize(**kwargs) self._impl = impl if hasattr(self._impl, 'fileno'): set_close_exec(self._impl.fileno()) self.time_func = time_func or time.time self._handlers = {} self._events = {} self._callbacks = collections.deque() self._timeouts = [] self._cancellations = 0 self._running = False self._stopped = False self._closing = False self._thread_ident = None self._blocking_signal_threshold = None self._timeout_counter = itertools.count() # Create a pipe that we send bogus data to when we want to wake # the I/O loop when it is idle self._waker = Waker() self.add_handler(self._waker.fileno(), lambda fd, events: self._waker.consume(), self.READ) def close(self, all_fds=False): self._closing = True self.remove_handler(self._waker.fileno()) if all_fds: for fd, handler in list(self._handlers.values()): self.close_fd(fd) self._waker.close() self._impl.close() self._callbacks = None self._timeouts = None def add_handler(self, fd, handler, events): fd, obj = self.split_fd(fd) self._handlers[fd] = (obj, stack_context.wrap(handler)) self._impl.register(fd, events | self.ERROR) def update_handler(self, fd, events): fd, obj = self.split_fd(fd) self._impl.modify(fd, events | self.ERROR) def remove_handler(self, fd): fd, obj = self.split_fd(fd) self._handlers.pop(fd, None) self._events.pop(fd, None) try: self._impl.unregister(fd) except Exception: gen_log.debug("Error deleting fd from IOLoop", exc_info=True) def set_blocking_signal_threshold(self, seconds, action): if not hasattr(signal, "setitimer"): gen_log.error("set_blocking_signal_threshold requires a signal module " "with the setitimer method") return self._blocking_signal_threshold = seconds if seconds is not None: signal.signal(signal.SIGALRM, action if action is not None else signal.SIG_DFL) def start(self): if self._running: raise RuntimeError("IOLoop is already running") self._setup_logging() if self._stopped: self._stopped = False return old_current = getattr(IOLoop._current, "instance", None) IOLoop._current.instance = self self._thread_ident = thread.get_ident() self._running = True # signal.set_wakeup_fd closes a race condition in event loops: # a signal may arrive at the beginning of select/poll/etc # before it goes into its interruptible sleep, so the signal # will be consumed without waking the select. The solution is # for the (C, synchronous) signal handler to write to a pipe, # which will then be seen by select. # # In python's signal handling semantics, this only matters on the # main thread (fortunately, set_wakeup_fd only works on the main # thread and will raise a ValueError otherwise). # # If someone has already set a wakeup fd, we don't want to # disturb it. This is an issue for twisted, which does its # SIGCHLD processing in response to its own wakeup fd being # written to. As long as the wakeup fd is registered on the IOLoop, # the loop will still wake up and everything should work. old_wakeup_fd = None if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix': # requires python 2.6+, unix. set_wakeup_fd exists but crashes # the python process on windows. try: old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno()) if old_wakeup_fd != -1: # Already set, restore previous value. This is a little racy, # but there's no clean get_wakeup_fd and in real use the # IOLoop is just started once at the beginning. signal.set_wakeup_fd(old_wakeup_fd) old_wakeup_fd = None except ValueError: # Non-main thread, or the previous value of wakeup_fd # is no longer valid. old_wakeup_fd = None try: while True: # Prevent IO event starvation by delaying new callbacks # to the next iteration of the event loop. ncallbacks = len(self._callbacks) # Add any timeouts that have come due to the callback list. # Do not run anything until we have determined which ones # are ready, so timeouts that call add_timeout cannot # schedule anything in this iteration. due_timeouts = [] if self._timeouts: now = self.time() while self._timeouts: if self._timeouts[0].callback is None: # The timeout was cancelled. Note that the # cancellation check is repeated below for timeouts # that are cancelled by another timeout or callback. heapq.heappop(self._timeouts) self._cancellations -= 1 elif self._timeouts[0].deadline <= now: due_timeouts.append(heapq.heappop(self._timeouts)) else: break if (self._cancellations > 512 and self._cancellations > (len(self._timeouts) >> 1)): # Clean up the timeout queue when it gets large and it's # more than half cancellations. self._cancellations = 0 self._timeouts = [x for x in self._timeouts if x.callback is not None] heapq.heapify(self._timeouts) for i in range(ncallbacks): self._run_callback(self._callbacks.popleft()) for timeout in due_timeouts: if timeout.callback is not None: self._run_callback(timeout.callback) # Closures may be holding on to a lot of memory, so allow # them to be freed before we go into our poll wait. due_timeouts = timeout = None if self._callbacks: # If any callbacks or timeouts called add_callback, # we don't want to wait in poll() before we run them. poll_timeout = 0.0 elif self._timeouts: # If there are any timeouts, schedule the first one. # Use self.time() instead of 'now' to account for time # spent running callbacks. poll_timeout = self._timeouts[0].deadline - self.time() poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT)) else: # No timeouts and no callbacks, so use the default. poll_timeout = _POLL_TIMEOUT if not self._running: break if self._blocking_signal_threshold is not None: # clear alarm so it doesn't fire while poll is waiting for # events. signal.setitimer(signal.ITIMER_REAL, 0, 0) try: event_pairs = self._impl.poll(poll_timeout) except Exception as e: # Depending on python version and IOLoop implementation, # different exception types may be thrown and there are # two ways EINTR might be signaled: # * e.errno == errno.EINTR # * e.args is like (errno.EINTR, 'Interrupted system call') if errno_from_exception(e) == errno.EINTR: continue else: raise if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, self._blocking_signal_threshold, 0) # Pop one fd at a time from the set of pending fds and run # its handler. Since that handler may perform actions on # other file descriptors, there may be reentrant calls to # this IOLoop that modify self._events self._events.update(event_pairs) while self._events: fd, events = self._events.popitem() try: fd_obj, handler_func = self._handlers[fd] handler_func(fd_obj, events) except (OSError, IOError) as e: if errno_from_exception(e) == errno.EPIPE: # Happens when the client closes the connection pass else: self.handle_callback_exception(self._handlers.get(fd)) except Exception: self.handle_callback_exception(self._handlers.get(fd)) fd_obj = handler_func = None finally: # reset the stopped flag so another start/stop pair can be issued self._stopped = False if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, 0, 0) IOLoop._current.instance = old_current if old_wakeup_fd is not None: signal.set_wakeup_fd(old_wakeup_fd) def stop(self): self._running = False self._stopped = True self._waker.wake() def time(self): return self.time_func() def call_at(self, deadline, callback, *args, **kwargs): timeout = _Timeout( deadline, functools.partial(stack_context.wrap(callback), *args, **kwargs), self) heapq.heappush(self._timeouts, timeout) return timeout def remove_timeout(self, timeout): # Removing from a heap is complicated, so just leave the defunct # timeout object in the queue (see discussion in # http://docs.python.org/library/heapq.html). # If this turns out to be a problem, we could add a garbage # collection pass whenever there are too many dead timeouts. timeout.callback = None self._cancellations += 1 def add_callback(self, callback, *args, **kwargs): if self._closing: return # Blindly insert into self._callbacks. This is safe even # from signal handlers because deque.append is atomic. self._callbacks.append(functools.partial( stack_context.wrap(callback), *args, **kwargs)) if thread.get_ident() != self._thread_ident: # This will write one byte but Waker.consume() reads many # at once, so it's ok to write even when not strictly # necessary. self._waker.wake() else: # If we're on the IOLoop's thread, we don't need to wake anyone. pass def add_callback_from_signal(self, callback, *args, **kwargs): with stack_context.NullContext(): self.add_callback(callback, *args, **kwargs) class _Timeout(object): """An IOLoop timeout, a UNIX timestamp and a callback""" # Reduce memory overhead when there are lots of pending callbacks __slots__ = ['deadline', 'callback', 'tdeadline'] def __init__(self, deadline, callback, io_loop): if not isinstance(deadline, numbers.Real): raise TypeError("Unsupported deadline %r" % deadline) self.deadline = deadline self.callback = callback self.tdeadline = (deadline, next(io_loop._timeout_counter)) # Comparison methods to sort by deadline, with object id as a tiebreaker # to guarantee a consistent ordering. The heapq module uses __le__ # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons # use __lt__). def __lt__(self, other): return self.tdeadline < other.tdeadline def __le__(self, other): return self.tdeadline <= other.tdeadline class PeriodicCallback(object): """Schedules the given callback to be called periodically. The callback is called every ``callback_time`` milliseconds. Note that the timeout is given in milliseconds, while most other time-related functions in Tornado use seconds. If the callback runs for longer than ``callback_time`` milliseconds, subsequent invocations will be skipped to get back on schedule. `start` must be called after the `PeriodicCallback` is created. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def __init__(self, callback, callback_time, io_loop=None): self.callback = callback if callback_time <= 0: raise ValueError("Periodic callback must have a positive callback_time") self.callback_time = callback_time self.io_loop = io_loop or IOLoop.current() self._running = False self._timeout = None def start(self): """Starts the timer.""" self._running = True self._next_timeout = self.io_loop.time() self._schedule_next() def stop(self): """Stops the timer.""" self._running = False if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None def is_running(self): """Return True if this `.PeriodicCallback` has been started. .. versionadded:: 4.1 """ return self._running def _run(self): if not self._running: return try: return self.callback() except Exception: self.io_loop.handle_callback_exception(self.callback) finally: self._schedule_next() def _schedule_next(self): if self._running: current_time = self.io_loop.time() if self._next_timeout <= current_time: callback_time_sec = self.callback_time / 1000.0 self._next_timeout += (math.floor((current_time - self._next_timeout) / callback_time_sec) + 1) * callback_time_sec self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run) tornado-4.5.3/tornado/iostream.py000066400000000000000000002005341322420601000170260ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility classes to write to and read from non-blocking files and sockets. Contents: * `BaseIOStream`: Generic interface for reading and writing. * `IOStream`: Implementation of BaseIOStream using non-blocking sockets. * `SSLIOStream`: SSL-aware version of IOStream. * `PipeIOStream`: Pipe-based IOStream implementation. """ from __future__ import absolute_import, division, print_function import collections import errno import numbers import os import socket import sys import re from tornado.concurrent import TracebackFuture from tornado import ioloop from tornado.log import gen_log, app_log from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError, _client_ssl_defaults, _server_ssl_defaults from tornado import stack_context from tornado.util import errno_from_exception try: from tornado.platform.posix import _set_nonblocking except ImportError: _set_nonblocking = None try: import ssl except ImportError: # ssl is not available on Google App Engine ssl = None # These errnos indicate that a non-blocking operation must be retried # at a later time. On most platforms they're the same value, but on # some they differ. _ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) if hasattr(errno, "WSAEWOULDBLOCK"): _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore # These errnos indicate that a connection has been abruptly terminated. # They should be caught and handled less noisily than other errors. _ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE, errno.ETIMEDOUT) if hasattr(errno, "WSAECONNRESET"): _ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore if sys.platform == 'darwin': # OSX appears to have a race condition that causes send(2) to return # EPROTOTYPE if called while a socket is being torn down: # http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ # Since the socket is being closed anyway, treat this as an ECONNRESET # instead of an unexpected error. _ERRNO_CONNRESET += (errno.EPROTOTYPE,) # type: ignore # More non-portable errnos: _ERRNO_INPROGRESS = (errno.EINPROGRESS,) if hasattr(errno, "WSAEINPROGRESS"): _ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,) # type: ignore _WINDOWS = sys.platform.startswith('win') class StreamClosedError(IOError): """Exception raised by `IOStream` methods when the stream is closed. Note that the close callback is scheduled to run *after* other callbacks on the stream (to allow for buffered data to be processed), so you may see this error before you see the close callback. The ``real_error`` attribute contains the underlying error that caused the stream to close (if any). .. versionchanged:: 4.3 Added the ``real_error`` attribute. """ def __init__(self, real_error=None): super(StreamClosedError, self).__init__('Stream is closed') self.real_error = real_error class UnsatisfiableReadError(Exception): """Exception raised when a read cannot be satisfied. Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes`` argument. """ pass class StreamBufferFullError(Exception): """Exception raised by `IOStream` methods when the buffer is full. """ class BaseIOStream(object): """A utility class to write to and read from a non-blocking file or socket. We support a non-blocking ``write()`` and a family of ``read_*()`` methods. All of the methods take an optional ``callback`` argument and return a `.Future` only if no callback is given. When the operation completes, the callback will be run or the `.Future` will resolve with the data read (or ``None`` for ``write()``). All outstanding ``Futures`` will resolve with a `StreamClosedError` when the stream is closed; users of the callback interface will be notified via `.BaseIOStream.set_close_callback` instead. When a stream is closed due to an error, the IOStream's ``error`` attribute contains the exception object. Subclasses must implement `fileno`, `close_fd`, `write_to_fd`, `read_from_fd`, and optionally `get_fd_error`. """ def __init__(self, io_loop=None, max_buffer_size=None, read_chunk_size=None, max_write_buffer_size=None): """`BaseIOStream` constructor. :arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`. Deprecated since Tornado 4.1. :arg max_buffer_size: Maximum amount of incoming data to buffer; defaults to 100MB. :arg read_chunk_size: Amount of data to read at one time from the underlying transport; defaults to 64KB. :arg max_write_buffer_size: Amount of outgoing data to buffer; defaults to unlimited. .. versionchanged:: 4.0 Add the ``max_write_buffer_size`` parameter. Changed default ``read_chunk_size`` to 64KB. """ self.io_loop = io_loop or ioloop.IOLoop.current() self.max_buffer_size = max_buffer_size or 104857600 # A chunk size that is too close to max_buffer_size can cause # spurious failures. self.read_chunk_size = min(read_chunk_size or 65536, self.max_buffer_size // 2) self.max_write_buffer_size = max_write_buffer_size self.error = None self._read_buffer = bytearray() self._read_buffer_pos = 0 self._read_buffer_size = 0 self._write_buffer = bytearray() self._write_buffer_pos = 0 self._write_buffer_size = 0 self._write_buffer_frozen = False self._total_write_index = 0 self._total_write_done_index = 0 self._pending_writes_while_frozen = [] self._read_delimiter = None self._read_regex = None self._read_max_bytes = None self._read_bytes = None self._read_partial = False self._read_until_close = False self._read_callback = None self._read_future = None self._streaming_callback = None self._write_callback = None self._write_futures = collections.deque() self._close_callback = None self._connect_callback = None self._connect_future = None # _ssl_connect_future should be defined in SSLIOStream # but it's here so we can clean it up in maybe_run_close_callback. # TODO: refactor that so subclasses can add additional futures # to be cancelled. self._ssl_connect_future = None self._connecting = False self._state = None self._pending_callbacks = 0 self._closed = False def fileno(self): """Returns the file descriptor for this stream.""" raise NotImplementedError() def close_fd(self): """Closes the file underlying this stream. ``close_fd`` is called by `BaseIOStream` and should not be called elsewhere; other users should call `close` instead. """ raise NotImplementedError() def write_to_fd(self, data): """Attempts to write ``data`` to the underlying file. Returns the number of bytes written. """ raise NotImplementedError() def read_from_fd(self): """Attempts to read from the underlying file. Returns ``None`` if there was nothing to read (the socket returned `~errno.EWOULDBLOCK` or equivalent), otherwise returns the data. When possible, should return no more than ``self.read_chunk_size`` bytes at a time. """ raise NotImplementedError() def get_fd_error(self): """Returns information about any error on the underlying file. This method is called after the `.IOLoop` has signaled an error on the file descriptor, and should return an Exception (such as `socket.error` with additional information, or None if no such information is available. """ return None def read_until_regex(self, regex, callback=None, max_bytes=None): """Asynchronously read until we have matched the given regex. The result includes the data that matches the regex and anything that came before it. If a callback is given, it will be run with the data as an argument; if not, this method returns a `.Future`. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the regex is not satisfied. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. """ future = self._set_read_callback(callback) self._read_regex = re.compile(regex) self._read_max_bytes = max_bytes try: self._try_inline_read() except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=True) return future except: if future is not None: # Ensure that the future doesn't log an error because its # failure was never examined. future.add_done_callback(lambda f: f.exception()) raise return future def read_until(self, delimiter, callback=None, max_bytes=None): """Asynchronously read until we have found the given delimiter. The result includes all the data read including the delimiter. If a callback is given, it will be run with the data as an argument; if not, this method returns a `.Future`. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the delimiter is not found. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. """ future = self._set_read_callback(callback) self._read_delimiter = delimiter self._read_max_bytes = max_bytes try: self._try_inline_read() except UnsatisfiableReadError as e: # Handle this the same way as in _handle_events. gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=True) return future except: if future is not None: future.add_done_callback(lambda f: f.exception()) raise return future def read_bytes(self, num_bytes, callback=None, streaming_callback=None, partial=False): """Asynchronously read a number of bytes. If a ``streaming_callback`` is given, it will be called with chunks of data as they become available, and the final result will be empty. Otherwise, the result is all the data that was read. If a callback is given, it will be run with the data as an argument; if not, this method returns a `.Future`. If ``partial`` is true, the callback is run as soon as we have any bytes to return (but never more than ``num_bytes``) .. versionchanged:: 4.0 Added the ``partial`` argument. The callback argument is now optional and a `.Future` will be returned if it is omitted. """ future = self._set_read_callback(callback) assert isinstance(num_bytes, numbers.Integral) self._read_bytes = num_bytes self._read_partial = partial self._streaming_callback = stack_context.wrap(streaming_callback) try: self._try_inline_read() except: if future is not None: future.add_done_callback(lambda f: f.exception()) raise return future def read_until_close(self, callback=None, streaming_callback=None): """Asynchronously reads all data from the socket until it is closed. If a ``streaming_callback`` is given, it will be called with chunks of data as they become available, and the final result will be empty. Otherwise, the result is all the data that was read. If a callback is given, it will be run with the data as an argument; if not, this method returns a `.Future`. Note that if a ``streaming_callback`` is used, data will be read from the socket as quickly as it becomes available; there is no way to apply backpressure or cancel the reads. If flow control or cancellation are desired, use a loop with `read_bytes(partial=True) <.read_bytes>` instead. .. versionchanged:: 4.0 The callback argument is now optional and a `.Future` will be returned if it is omitted. """ future = self._set_read_callback(callback) self._streaming_callback = stack_context.wrap(streaming_callback) if self.closed(): if self._streaming_callback is not None: self._run_read_callback(self._read_buffer_size, True) self._run_read_callback(self._read_buffer_size, False) return future self._read_until_close = True try: self._try_inline_read() except: if future is not None: future.add_done_callback(lambda f: f.exception()) raise return future def write(self, data, callback=None): """Asynchronously write the given data to this stream. If ``callback`` is given, we call it when all of the buffered write data has been successfully written to the stream. If there was previously buffered write data and an old write callback, that callback is simply overwritten with this new callback. If no ``callback`` is given, this method returns a `.Future` that resolves (with a result of ``None``) when the write has been completed. The ``data`` argument may be of type `bytes` or `memoryview`. .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. .. versionchanged:: 4.5 Added support for `memoryview` arguments. """ self._check_closed() if data: if (self.max_write_buffer_size is not None and self._write_buffer_size + len(data) > self.max_write_buffer_size): raise StreamBufferFullError("Reached maximum write buffer size") if self._write_buffer_frozen: self._pending_writes_while_frozen.append(data) else: self._write_buffer += data self._write_buffer_size += len(data) self._total_write_index += len(data) if callback is not None: self._write_callback = stack_context.wrap(callback) future = None else: future = TracebackFuture() future.add_done_callback(lambda f: f.exception()) self._write_futures.append((self._total_write_index, future)) if not self._connecting: self._handle_write() if self._write_buffer_size: self._add_io_state(self.io_loop.WRITE) self._maybe_add_error_listener() return future def set_close_callback(self, callback): """Call the given callback when the stream is closed. This is not necessary for applications that use the `.Future` interface; all outstanding ``Futures`` will resolve with a `StreamClosedError` when the stream is closed. """ self._close_callback = stack_context.wrap(callback) self._maybe_add_error_listener() def close(self, exc_info=False): """Close this stream. If ``exc_info`` is true, set the ``error`` attribute to the current exception from `sys.exc_info` (or if ``exc_info`` is a tuple, use that instead of `sys.exc_info`). """ if not self.closed(): if exc_info: if not isinstance(exc_info, tuple): exc_info = sys.exc_info() if any(exc_info): self.error = exc_info[1] if self._read_until_close: if (self._streaming_callback is not None and self._read_buffer_size): self._run_read_callback(self._read_buffer_size, True) self._read_until_close = False self._run_read_callback(self._read_buffer_size, False) if self._state is not None: self.io_loop.remove_handler(self.fileno()) self._state = None self.close_fd() self._closed = True self._maybe_run_close_callback() def _maybe_run_close_callback(self): # If there are pending callbacks, don't run the close callback # until they're done (see _maybe_add_error_handler) if self.closed() and self._pending_callbacks == 0: futures = [] if self._read_future is not None: futures.append(self._read_future) self._read_future = None futures += [future for _, future in self._write_futures] self._write_futures.clear() if self._connect_future is not None: futures.append(self._connect_future) self._connect_future = None if self._ssl_connect_future is not None: futures.append(self._ssl_connect_future) self._ssl_connect_future = None for future in futures: future.set_exception(StreamClosedError(real_error=self.error)) if self._close_callback is not None: cb = self._close_callback self._close_callback = None self._run_callback(cb) # Delete any unfinished callbacks to break up reference cycles. self._read_callback = self._write_callback = None # Clear the buffers so they can be cleared immediately even # if the IOStream object is kept alive by a reference cycle. # TODO: Clear the read buffer too; it currently breaks some tests. self._write_buffer = None self._write_buffer_size = 0 def reading(self): """Returns true if we are currently reading from the stream.""" return self._read_callback is not None or self._read_future is not None def writing(self): """Returns true if we are currently writing to the stream.""" return self._write_buffer_size > 0 def closed(self): """Returns true if the stream has been closed.""" return self._closed def set_nodelay(self, value): """Sets the no-delay flag for this stream. By default, data written to TCP streams may be held for a time to make the most efficient use of bandwidth (according to Nagle's algorithm). The no-delay flag requests that data be written as soon as possible, even if doing so would consume additional bandwidth. This flag is currently defined only for TCP-based ``IOStreams``. .. versionadded:: 3.1 """ pass def _handle_events(self, fd, events): if self.closed(): gen_log.warning("Got events for closed stream %s", fd) return try: if self._connecting: # Most IOLoops will report a write failed connect # with the WRITE event, but SelectIOLoop reports a # READ as well so we must check for connecting before # either. self._handle_connect() if self.closed(): return if events & self.io_loop.READ: self._handle_read() if self.closed(): return if events & self.io_loop.WRITE: self._handle_write() if self.closed(): return if events & self.io_loop.ERROR: self.error = self.get_fd_error() # We may have queued up a user callback in _handle_read or # _handle_write, so don't close the IOStream until those # callbacks have had a chance to run. self.io_loop.add_callback(self.close) return state = self.io_loop.ERROR if self.reading(): state |= self.io_loop.READ if self.writing(): state |= self.io_loop.WRITE if state == self.io_loop.ERROR and self._read_buffer_size == 0: # If the connection is idle, listen for reads too so # we can tell if the connection is closed. If there is # data in the read buffer we won't run the close callback # yet anyway, so we don't need to listen in this case. state |= self.io_loop.READ if state != self._state: assert self._state is not None, \ "shouldn't happen: _handle_events without self._state" self._state = state self.io_loop.update_handler(self.fileno(), self._state) except UnsatisfiableReadError as e: gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=True) except Exception: gen_log.error("Uncaught exception, closing connection.", exc_info=True) self.close(exc_info=True) raise def _run_callback(self, callback, *args): def wrapper(): self._pending_callbacks -= 1 try: return callback(*args) except Exception: app_log.error("Uncaught exception, closing connection.", exc_info=True) # Close the socket on an uncaught exception from a user callback # (It would eventually get closed when the socket object is # gc'd, but we don't want to rely on gc happening before we # run out of file descriptors) self.close(exc_info=True) # Re-raise the exception so that IOLoop.handle_callback_exception # can see it and log the error raise finally: self._maybe_add_error_listener() # We schedule callbacks to be run on the next IOLoop iteration # rather than running them directly for several reasons: # * Prevents unbounded stack growth when a callback calls an # IOLoop operation that immediately runs another callback # * Provides a predictable execution context for e.g. # non-reentrant mutexes # * Ensures that the try/except in wrapper() is run outside # of the application's StackContexts with stack_context.NullContext(): # stack_context was already captured in callback, we don't need to # capture it again for IOStream's wrapper. This is especially # important if the callback was pre-wrapped before entry to # IOStream (as in HTTPConnection._header_callback), as we could # capture and leak the wrong context here. self._pending_callbacks += 1 self.io_loop.add_callback(wrapper) def _read_to_buffer_loop(self): # This method is called from _handle_read and _try_inline_read. try: if self._read_bytes is not None: target_bytes = self._read_bytes elif self._read_max_bytes is not None: target_bytes = self._read_max_bytes elif self.reading(): # For read_until without max_bytes, or # read_until_close, read as much as we can before # scanning for the delimiter. target_bytes = None else: target_bytes = 0 next_find_pos = 0 # Pretend to have a pending callback so that an EOF in # _read_to_buffer doesn't trigger an immediate close # callback. At the end of this method we'll either # establish a real pending callback via # _read_from_buffer or run the close callback. # # We need two try statements here so that # pending_callbacks is decremented before the `except` # clause below (which calls `close` and does need to # trigger the callback) self._pending_callbacks += 1 while not self.closed(): # Read from the socket until we get EWOULDBLOCK or equivalent. # SSL sockets do some internal buffering, and if the data is # sitting in the SSL object's buffer select() and friends # can't see it; the only way to find out if it's there is to # try to read it. if self._read_to_buffer() == 0: break self._run_streaming_callback() # If we've read all the bytes we can use, break out of # this loop. We can't just call read_from_buffer here # because of subtle interactions with the # pending_callback and error_listener mechanisms. # # If we've reached target_bytes, we know we're done. if (target_bytes is not None and self._read_buffer_size >= target_bytes): break # Otherwise, we need to call the more expensive find_read_pos. # It's inefficient to do this on every read, so instead # do it on the first read and whenever the read buffer # size has doubled. if self._read_buffer_size >= next_find_pos: pos = self._find_read_pos() if pos is not None: return pos next_find_pos = self._read_buffer_size * 2 return self._find_read_pos() finally: self._pending_callbacks -= 1 def _handle_read(self): try: pos = self._read_to_buffer_loop() except UnsatisfiableReadError: raise except Exception as e: gen_log.warning("error on read: %s" % e) self.close(exc_info=True) return if pos is not None: self._read_from_buffer(pos) return else: self._maybe_run_close_callback() def _set_read_callback(self, callback): assert self._read_callback is None, "Already reading" assert self._read_future is None, "Already reading" if callback is not None: self._read_callback = stack_context.wrap(callback) else: self._read_future = TracebackFuture() return self._read_future def _run_read_callback(self, size, streaming): if streaming: callback = self._streaming_callback else: callback = self._read_callback self._read_callback = self._streaming_callback = None if self._read_future is not None: assert callback is None future = self._read_future self._read_future = None future.set_result(self._consume(size)) if callback is not None: assert (self._read_future is None) or streaming self._run_callback(callback, self._consume(size)) else: # If we scheduled a callback, we will add the error listener # afterwards. If we didn't, we have to do it now. self._maybe_add_error_listener() def _try_inline_read(self): """Attempt to complete the current read operation from buffered data. If the read can be completed without blocking, schedules the read callback on the next IOLoop iteration; otherwise starts listening for reads on the socket. """ # See if we've already got the data from a previous read self._run_streaming_callback() pos = self._find_read_pos() if pos is not None: self._read_from_buffer(pos) return self._check_closed() try: pos = self._read_to_buffer_loop() except Exception: # If there was an in _read_to_buffer, we called close() already, # but couldn't run the close callback because of _pending_callbacks. # Before we escape from this function, run the close callback if # applicable. self._maybe_run_close_callback() raise if pos is not None: self._read_from_buffer(pos) return # We couldn't satisfy the read inline, so either close the stream # or listen for new data. if self.closed(): self._maybe_run_close_callback() else: self._add_io_state(ioloop.IOLoop.READ) def _read_to_buffer(self): """Reads from the socket and appends the result to the read buffer. Returns the number of bytes read. Returns 0 if there is nothing to read (i.e. the read returns EWOULDBLOCK or equivalent). On error closes the socket and raises an exception. """ while True: try: chunk = self.read_from_fd() except (socket.error, IOError, OSError) as e: if errno_from_exception(e) == errno.EINTR: continue # ssl.SSLError is a subclass of socket.error if self._is_connreset(e): # Treat ECONNRESET as a connection close rather than # an error to minimize log spam (the exception will # be available on self.error for apps that care). self.close(exc_info=True) return self.close(exc_info=True) raise break if chunk is None: return 0 self._read_buffer += chunk self._read_buffer_size += len(chunk) if self._read_buffer_size > self.max_buffer_size: gen_log.error("Reached maximum read buffer size") self.close() raise StreamBufferFullError("Reached maximum read buffer size") return len(chunk) def _run_streaming_callback(self): if self._streaming_callback is not None and self._read_buffer_size: bytes_to_consume = self._read_buffer_size if self._read_bytes is not None: bytes_to_consume = min(self._read_bytes, bytes_to_consume) self._read_bytes -= bytes_to_consume self._run_read_callback(bytes_to_consume, True) def _read_from_buffer(self, pos): """Attempts to complete the currently-pending read from the buffer. The argument is either a position in the read buffer or None, as returned by _find_read_pos. """ self._read_bytes = self._read_delimiter = self._read_regex = None self._read_partial = False self._run_read_callback(pos, False) def _find_read_pos(self): """Attempts to find a position in the read buffer that satisfies the currently-pending read. Returns a position in the buffer if the current read can be satisfied, or None if it cannot. """ if (self._read_bytes is not None and (self._read_buffer_size >= self._read_bytes or (self._read_partial and self._read_buffer_size > 0))): num_bytes = min(self._read_bytes, self._read_buffer_size) return num_bytes elif self._read_delimiter is not None: # Multi-byte delimiters (e.g. '\r\n') may straddle two # chunks in the read buffer, so we can't easily find them # without collapsing the buffer. However, since protocols # using delimited reads (as opposed to reads of a known # length) tend to be "line" oriented, the delimiter is likely # to be in the first few chunks. Merge the buffer gradually # since large merges are relatively expensive and get undone in # _consume(). if self._read_buffer: loc = self._read_buffer.find(self._read_delimiter, self._read_buffer_pos) if loc != -1: loc -= self._read_buffer_pos delimiter_len = len(self._read_delimiter) self._check_max_bytes(self._read_delimiter, loc + delimiter_len) return loc + delimiter_len self._check_max_bytes(self._read_delimiter, self._read_buffer_size) elif self._read_regex is not None: if self._read_buffer: m = self._read_regex.search(self._read_buffer, self._read_buffer_pos) if m is not None: loc = m.end() - self._read_buffer_pos self._check_max_bytes(self._read_regex, loc) return loc self._check_max_bytes(self._read_regex, self._read_buffer_size) return None def _check_max_bytes(self, delimiter, size): if (self._read_max_bytes is not None and size > self._read_max_bytes): raise UnsatisfiableReadError( "delimiter %r not found within %d bytes" % ( delimiter, self._read_max_bytes)) def _freeze_write_buffer(self, size): self._write_buffer_frozen = size def _unfreeze_write_buffer(self): self._write_buffer_frozen = False self._write_buffer += b''.join(self._pending_writes_while_frozen) self._write_buffer_size += sum(map(len, self._pending_writes_while_frozen)) self._pending_writes_while_frozen[:] = [] def _got_empty_write(self, size): """ Called when a non-blocking write() failed writing anything. Can be overridden in subclasses. """ def _handle_write(self): while self._write_buffer_size: assert self._write_buffer_size >= 0 try: start = self._write_buffer_pos if self._write_buffer_frozen: size = self._write_buffer_frozen elif _WINDOWS: # On windows, socket.send blows up if given a # write buffer that's too large, instead of just # returning the number of bytes it was able to # process. Therefore we must not call socket.send # with more than 128KB at a time. size = 128 * 1024 else: size = self._write_buffer_size num_bytes = self.write_to_fd( memoryview(self._write_buffer)[start:start + size]) if num_bytes == 0: self._got_empty_write(size) break self._write_buffer_pos += num_bytes self._write_buffer_size -= num_bytes # Amortized O(1) shrink # (this heuristic is implemented natively in Python 3.4+ # but is replicated here for Python 2) if self._write_buffer_pos > self._write_buffer_size: del self._write_buffer[:self._write_buffer_pos] self._write_buffer_pos = 0 if self._write_buffer_frozen: self._unfreeze_write_buffer() self._total_write_done_index += num_bytes except (socket.error, IOError, OSError) as e: if e.args[0] in _ERRNO_WOULDBLOCK: self._got_empty_write(size) break else: if not self._is_connreset(e): # Broken pipe errors are usually caused by connection # reset, and its better to not log EPIPE errors to # minimize log spam gen_log.warning("Write error on %s: %s", self.fileno(), e) self.close(exc_info=True) return while self._write_futures: index, future = self._write_futures[0] if index > self._total_write_done_index: break self._write_futures.popleft() future.set_result(None) if not self._write_buffer_size: if self._write_callback: callback = self._write_callback self._write_callback = None self._run_callback(callback) def _consume(self, loc): # Consume loc bytes from the read buffer and return them if loc == 0: return b"" assert loc <= self._read_buffer_size # Slice the bytearray buffer into bytes, without intermediate copying b = (memoryview(self._read_buffer) [self._read_buffer_pos:self._read_buffer_pos + loc] ).tobytes() self._read_buffer_pos += loc self._read_buffer_size -= loc # Amortized O(1) shrink # (this heuristic is implemented natively in Python 3.4+ # but is replicated here for Python 2) if self._read_buffer_pos > self._read_buffer_size: del self._read_buffer[:self._read_buffer_pos] self._read_buffer_pos = 0 return b def _check_closed(self): if self.closed(): raise StreamClosedError(real_error=self.error) def _maybe_add_error_listener(self): # This method is part of an optimization: to detect a connection that # is closed when we're not actively reading or writing, we must listen # for read events. However, it is inefficient to do this when the # connection is first established because we are going to read or write # immediately anyway. Instead, we insert checks at various times to # see if the connection is idle and add the read listener then. if self._pending_callbacks != 0: return if self._state is None or self._state == ioloop.IOLoop.ERROR: if self.closed(): self._maybe_run_close_callback() elif (self._read_buffer_size == 0 and self._close_callback is not None): self._add_io_state(ioloop.IOLoop.READ) def _add_io_state(self, state): """Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler. Implementation notes: Reads and writes have a fast path and a slow path. The fast path reads synchronously from socket buffers, while the slow path uses `_add_io_state` to schedule an IOLoop callback. Note that in both cases, the callback is run asynchronously with `_run_callback`. To detect closed connections, we must have called `_add_io_state` at some point, but we want to delay this as much as possible so we don't have to set an `IOLoop.ERROR` listener that will be overwritten by the next slow-path operation. As long as there are callbacks scheduled for fast-path ops, those callbacks may do more reads. If a sequence of fast-path ops do not end in a slow-path op, (e.g. for an @asynchronous long-poll request), we must add the error handler. This is done in `_run_callback` and `write` (since the write callback is optional so we can have a fast-path write with no `_run_callback`) """ if self.closed(): # connection has been closed, so there can be no future events return if self._state is None: self._state = ioloop.IOLoop.ERROR | state with stack_context.NullContext(): self.io_loop.add_handler( self.fileno(), self._handle_events, self._state) elif not self._state & state: self._state = self._state | state self.io_loop.update_handler(self.fileno(), self._state) def _is_connreset(self, exc): """Return true if exc is ECONNRESET or equivalent. May be overridden in subclasses. """ return (isinstance(exc, (socket.error, IOError)) and errno_from_exception(exc) in _ERRNO_CONNRESET) class IOStream(BaseIOStream): r"""Socket-based `IOStream` implementation. This class supports the read and write methods from `BaseIOStream` plus a `connect` method. The ``socket`` parameter may either be connected or unconnected. For server operations the socket is the result of calling `socket.accept `. For client operations the socket is created with `socket.socket`, and may either be connected before passing it to the `IOStream` or connected with `IOStream.connect`. A very simple (and broken) HTTP client using this class: .. testcode:: import tornado.ioloop import tornado.iostream import socket def send_request(): stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n") stream.read_until(b"\r\n\r\n", on_headers) def on_headers(data): headers = {} for line in data.split(b"\r\n"): parts = line.split(b":") if len(parts) == 2: headers[parts[0].strip()] = parts[1].strip() stream.read_bytes(int(headers[b"Content-Length"]), on_body) def on_body(data): print(data) stream.close() tornado.ioloop.IOLoop.current().stop() if __name__ == '__main__': s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = tornado.iostream.IOStream(s) stream.connect(("friendfeed.com", 80), send_request) tornado.ioloop.IOLoop.current().start() .. testoutput:: :hide: """ def __init__(self, socket, *args, **kwargs): self.socket = socket self.socket.setblocking(False) super(IOStream, self).__init__(*args, **kwargs) def fileno(self): return self.socket def close_fd(self): self.socket.close() self.socket = None def get_fd_error(self): errno = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) return socket.error(errno, os.strerror(errno)) def read_from_fd(self): try: chunk = self.socket.recv(self.read_chunk_size) except socket.error as e: if e.args[0] in _ERRNO_WOULDBLOCK: return None else: raise if not chunk: self.close() return None return chunk def write_to_fd(self, data): try: return self.socket.send(data) finally: # Avoid keeping to data, which can be a memoryview. # See https://github.com/tornadoweb/tornado/pull/2008 del data def connect(self, address, callback=None, server_hostname=None): """Connects the socket to a remote address without blocking. May only be called if the socket passed to the constructor was not previously connected. The address parameter is in the same format as for `socket.connect ` for the type of socket passed to the IOStream constructor, e.g. an ``(ip, port)`` tuple. Hostnames are accepted here, but will be resolved synchronously and block the IOLoop. If you have a hostname instead of an IP address, the `.TCPClient` class is recommended instead of calling this method directly. `.TCPClient` will do asynchronous DNS resolution and handle both IPv4 and IPv6. If ``callback`` is specified, it will be called with no arguments when the connection is completed; if not this method returns a `.Future` (whose result after a successful connection will be the stream itself). In SSL mode, the ``server_hostname`` parameter will be used for certificate validation (unless disabled in the ``ssl_options``) and SNI (if supported; requires Python 2.7.9+). Note that it is safe to call `IOStream.write ` while the connection is pending, in which case the data will be written as soon as the connection is ready. Calling `IOStream` read methods before the socket is connected works on some platforms but is non-portable. .. versionchanged:: 4.0 If no callback is given, returns a `.Future`. .. versionchanged:: 4.2 SSL certificates are validated by default; pass ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a suitably-configured `ssl.SSLContext` to the `SSLIOStream` constructor to disable. """ self._connecting = True if callback is not None: self._connect_callback = stack_context.wrap(callback) future = None else: future = self._connect_future = TracebackFuture() try: self.socket.connect(address) except socket.error as e: # In non-blocking mode we expect connect() to raise an # exception with EINPROGRESS or EWOULDBLOCK. # # On freebsd, other errors such as ECONNREFUSED may be # returned immediately when attempting to connect to # localhost, so handle them the same way as an error # reported later in _handle_connect. if (errno_from_exception(e) not in _ERRNO_INPROGRESS and errno_from_exception(e) not in _ERRNO_WOULDBLOCK): if future is None: gen_log.warning("Connect error on fd %s: %s", self.socket.fileno(), e) self.close(exc_info=True) return future self._add_io_state(self.io_loop.WRITE) return future def start_tls(self, server_side, ssl_options=None, server_hostname=None): """Convert this `IOStream` to an `SSLIOStream`. This enables protocols that begin in clear-text mode and switch to SSL after some initial negotiation (such as the ``STARTTLS`` extension to SMTP and IMAP). This method cannot be used if there are outstanding reads or writes on the stream, or if there is any data in the IOStream's buffer (data in the operating system's socket buffer is allowed). This means it must generally be used immediately after reading or writing the last clear-text data. It can also be used immediately after connecting, before any reads or writes. The ``ssl_options`` argument may be either an `ssl.SSLContext` object or a dictionary of keyword arguments for the `ssl.wrap_socket` function. The ``server_hostname`` argument will be used for certificate validation unless disabled in the ``ssl_options``. This method returns a `.Future` whose result is the new `SSLIOStream`. After this method has been called, any other operation on the original stream is undefined. If a close callback is defined on this stream, it will be transferred to the new stream. .. versionadded:: 4.0 .. versionchanged:: 4.2 SSL certificates are validated by default; pass ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a suitably-configured `ssl.SSLContext` to disable. """ if (self._read_callback or self._read_future or self._write_callback or self._write_futures or self._connect_callback or self._connect_future or self._pending_callbacks or self._closed or self._read_buffer or self._write_buffer): raise ValueError("IOStream is not idle; cannot convert to SSL") if ssl_options is None: if server_side: ssl_options = _server_ssl_defaults else: ssl_options = _client_ssl_defaults socket = self.socket self.io_loop.remove_handler(socket) self.socket = None socket = ssl_wrap_socket(socket, ssl_options, server_hostname=server_hostname, server_side=server_side, do_handshake_on_connect=False) orig_close_callback = self._close_callback self._close_callback = None future = TracebackFuture() ssl_stream = SSLIOStream(socket, ssl_options=ssl_options, io_loop=self.io_loop) # Wrap the original close callback so we can fail our Future as well. # If we had an "unwrap" counterpart to this method we would need # to restore the original callback after our Future resolves # so that repeated wrap/unwrap calls don't build up layers. def close_callback(): if not future.done(): # Note that unlike most Futures returned by IOStream, # this one passes the underlying error through directly # instead of wrapping everything in a StreamClosedError # with a real_error attribute. This is because once the # connection is established it's more helpful to raise # the SSLError directly than to hide it behind a # StreamClosedError (and the client is expecting SSL # issues rather than network issues since this method is # named start_tls). future.set_exception(ssl_stream.error or StreamClosedError()) if orig_close_callback is not None: orig_close_callback() ssl_stream.set_close_callback(close_callback) ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream) ssl_stream.max_buffer_size = self.max_buffer_size ssl_stream.read_chunk_size = self.read_chunk_size return future def _handle_connect(self): err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err != 0: self.error = socket.error(err, os.strerror(err)) # IOLoop implementations may vary: some of them return # an error state before the socket becomes writable, so # in that case a connection failure would be handled by the # error path in _handle_events instead of here. if self._connect_future is None: gen_log.warning("Connect error on fd %s: %s", self.socket.fileno(), errno.errorcode[err]) self.close() return if self._connect_callback is not None: callback = self._connect_callback self._connect_callback = None self._run_callback(callback) if self._connect_future is not None: future = self._connect_future self._connect_future = None future.set_result(self) self._connecting = False def set_nodelay(self, value): if (self.socket is not None and self.socket.family in (socket.AF_INET, socket.AF_INET6)): try: self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1 if value else 0) except socket.error as e: # Sometimes setsockopt will fail if the socket is closed # at the wrong time. This can happen with HTTPServer # resetting the value to false between requests. if e.errno != errno.EINVAL and not self._is_connreset(e): raise class SSLIOStream(IOStream): """A utility class to write to and read from a non-blocking SSL socket. If the socket passed to the constructor is already connected, it should be wrapped with:: ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs) before constructing the `SSLIOStream`. Unconnected sockets will be wrapped when `IOStream.connect` is finished. """ def __init__(self, *args, **kwargs): """The ``ssl_options`` keyword argument may either be an `ssl.SSLContext` object or a dictionary of keywords arguments for `ssl.wrap_socket` """ self._ssl_options = kwargs.pop('ssl_options', _client_ssl_defaults) super(SSLIOStream, self).__init__(*args, **kwargs) self._ssl_accepting = True self._handshake_reading = False self._handshake_writing = False self._ssl_connect_callback = None self._server_hostname = None # If the socket is already connected, attempt to start the handshake. try: self.socket.getpeername() except socket.error: pass else: # Indirectly start the handshake, which will run on the next # IOLoop iteration and then the real IO state will be set in # _handle_events. self._add_io_state(self.io_loop.WRITE) def reading(self): return self._handshake_reading or super(SSLIOStream, self).reading() def writing(self): return self._handshake_writing or super(SSLIOStream, self).writing() def _got_empty_write(self, size): # With OpenSSL, if we couldn't write the entire buffer, # the very same string object must be used on the # next call to send. Therefore we suppress # merging the write buffer after an incomplete send. # A cleaner solution would be to set # SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is # not yet accessible from python # (http://bugs.python.org/issue8240) self._freeze_write_buffer(size) def _do_ssl_handshake(self): # Based on code from test_ssl.py in the python stdlib try: self._handshake_reading = False self._handshake_writing = False self.socket.do_handshake() except ssl.SSLError as err: if err.args[0] == ssl.SSL_ERROR_WANT_READ: self._handshake_reading = True return elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._handshake_writing = True return elif err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): return self.close(exc_info=True) elif err.args[0] == ssl.SSL_ERROR_SSL: try: peer = self.socket.getpeername() except Exception: peer = '(not connected)' gen_log.warning("SSL Error on %s %s: %s", self.socket.fileno(), peer, err) return self.close(exc_info=True) raise except socket.error as err: # Some port scans (e.g. nmap in -sT mode) have been known # to cause do_handshake to raise EBADF and ENOTCONN, so make # those errors quiet as well. # https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0 if (self._is_connreset(err) or err.args[0] in (errno.EBADF, errno.ENOTCONN)): return self.close(exc_info=True) raise except AttributeError: # On Linux, if the connection was reset before the call to # wrap_socket, do_handshake will fail with an # AttributeError. return self.close(exc_info=True) else: self._ssl_accepting = False if not self._verify_cert(self.socket.getpeercert()): self.close() return self._run_ssl_connect_callback() def _run_ssl_connect_callback(self): if self._ssl_connect_callback is not None: callback = self._ssl_connect_callback self._ssl_connect_callback = None self._run_callback(callback) if self._ssl_connect_future is not None: future = self._ssl_connect_future self._ssl_connect_future = None future.set_result(self) def _verify_cert(self, peercert): """Returns True if peercert is valid according to the configured validation mode and hostname. The ssl handshake already tested the certificate for a valid CA signature; the only thing that remains is to check the hostname. """ if isinstance(self._ssl_options, dict): verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE) elif isinstance(self._ssl_options, ssl.SSLContext): verify_mode = self._ssl_options.verify_mode assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL) if verify_mode == ssl.CERT_NONE or self._server_hostname is None: return True cert = self.socket.getpeercert() if cert is None and verify_mode == ssl.CERT_REQUIRED: gen_log.warning("No SSL certificate given") return False try: ssl_match_hostname(peercert, self._server_hostname) except SSLCertificateError as e: gen_log.warning("Invalid SSL certificate: %s" % e) return False else: return True def _handle_read(self): if self._ssl_accepting: self._do_ssl_handshake() return super(SSLIOStream, self)._handle_read() def _handle_write(self): if self._ssl_accepting: self._do_ssl_handshake() return super(SSLIOStream, self)._handle_write() def connect(self, address, callback=None, server_hostname=None): self._server_hostname = server_hostname # Pass a dummy callback to super.connect(), which is slightly # more efficient than letting it return a Future we ignore. super(SSLIOStream, self).connect(address, callback=lambda: None) return self.wait_for_handshake(callback) def _handle_connect(self): # Call the superclass method to check for errors. super(SSLIOStream, self)._handle_connect() if self.closed(): return # When the connection is complete, wrap the socket for SSL # traffic. Note that we do this by overriding _handle_connect # instead of by passing a callback to super().connect because # user callbacks are enqueued asynchronously on the IOLoop, # but since _handle_events calls _handle_connect immediately # followed by _handle_write we need this to be synchronous. # # The IOLoop will get confused if we swap out self.socket while the # fd is registered, so remove it now and re-register after # wrap_socket(). self.io_loop.remove_handler(self.socket) old_state = self._state self._state = None self.socket = ssl_wrap_socket(self.socket, self._ssl_options, server_hostname=self._server_hostname, do_handshake_on_connect=False) self._add_io_state(old_state) def wait_for_handshake(self, callback=None): """Wait for the initial SSL handshake to complete. If a ``callback`` is given, it will be called with no arguments once the handshake is complete; otherwise this method returns a `.Future` which will resolve to the stream itself after the handshake is complete. Once the handshake is complete, information such as the peer's certificate and NPN/ALPN selections may be accessed on ``self.socket``. This method is intended for use on server-side streams or after using `IOStream.start_tls`; it should not be used with `IOStream.connect` (which already waits for the handshake to complete). It may only be called once per stream. .. versionadded:: 4.2 """ if (self._ssl_connect_callback is not None or self._ssl_connect_future is not None): raise RuntimeError("Already waiting") if callback is not None: self._ssl_connect_callback = stack_context.wrap(callback) future = None else: future = self._ssl_connect_future = TracebackFuture() if not self._ssl_accepting: self._run_ssl_connect_callback() return future def write_to_fd(self, data): try: return self.socket.send(data) except ssl.SSLError as e: if e.args[0] == ssl.SSL_ERROR_WANT_WRITE: # In Python 3.5+, SSLSocket.send raises a WANT_WRITE error if # the socket is not writeable; we need to transform this into # an EWOULDBLOCK socket.error or a zero return value, # either of which will be recognized by the caller of this # method. Prior to Python 3.5, an unwriteable socket would # simply return 0 bytes written. return 0 raise finally: # Avoid keeping to data, which can be a memoryview. # See https://github.com/tornadoweb/tornado/pull/2008 del data def read_from_fd(self): if self._ssl_accepting: # If the handshake hasn't finished yet, there can't be anything # to read (attempting to read may or may not raise an exception # depending on the SSL version) return None try: # SSLSocket objects have both a read() and recv() method, # while regular sockets only have recv(). # The recv() method blocks (at least in python 2.6) if it is # called when there is nothing to read, so we have to use # read() instead. chunk = self.socket.read(self.read_chunk_size) except ssl.SSLError as e: # SSLError is a subclass of socket.error, so this except # block must come first. if e.args[0] == ssl.SSL_ERROR_WANT_READ: return None else: raise except socket.error as e: if e.args[0] in _ERRNO_WOULDBLOCK: return None else: raise if not chunk: self.close() return None return chunk def _is_connreset(self, e): if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF: return True return super(SSLIOStream, self)._is_connreset(e) class PipeIOStream(BaseIOStream): """Pipe-based `IOStream` implementation. The constructor takes an integer file descriptor (such as one returned by `os.pipe`) rather than an open file object. Pipes are generally one-way, so a `PipeIOStream` can be used for reading or writing but not both. """ def __init__(self, fd, *args, **kwargs): self.fd = fd _set_nonblocking(fd) super(PipeIOStream, self).__init__(*args, **kwargs) def fileno(self): return self.fd def close_fd(self): os.close(self.fd) def write_to_fd(self, data): try: return os.write(self.fd, data) finally: # Avoid keeping to data, which can be a memoryview. # See https://github.com/tornadoweb/tornado/pull/2008 del data def read_from_fd(self): try: chunk = os.read(self.fd, self.read_chunk_size) except (IOError, OSError) as e: if errno_from_exception(e) in _ERRNO_WOULDBLOCK: return None elif errno_from_exception(e) == errno.EBADF: # If the writing half of a pipe is closed, select will # report it as readable but reads will fail with EBADF. self.close(exc_info=True) return None else: raise if not chunk: self.close() return None return chunk def doctests(): import doctest return doctest.DocTestSuite() tornado-4.5.3/tornado/locale.py000066400000000000000000000475021322420601000164460ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Translation methods for generating localized strings. To load a locale and generate a translated string:: user_locale = tornado.locale.get("es_LA") print(user_locale.translate("Sign out")) `tornado.locale.get()` returns the closest matching locale, not necessarily the specific locale you requested. You can support pluralization with additional arguments to `~Locale.translate()`, e.g.:: people = [...] message = user_locale.translate( "%(list)s is online", "%(list)s are online", len(people)) print(message % {"list": user_locale.list(people)}) The first string is chosen if ``len(people) == 1``, otherwise the second string is chosen. Applications should call one of `load_translations` (which uses a simple CSV format) or `load_gettext_translations` (which uses the ``.mo`` format supported by `gettext` and related tools). If neither method is called, the `Locale.translate` method will simply return the original string. """ from __future__ import absolute_import, division, print_function import codecs import csv import datetime from io import BytesIO import numbers import os import re from tornado import escape from tornado.log import gen_log from tornado.util import PY3 from tornado._locale_data import LOCALE_NAMES _default_locale = "en_US" _translations = {} # type: dict _supported_locales = frozenset([_default_locale]) _use_gettext = False CONTEXT_SEPARATOR = "\x04" def get(*locale_codes): """Returns the closest match for the given locale codes. We iterate over all given locale codes in order. If we have a tight or a loose match for the code (e.g., "en" for "en_US"), we return the locale. Otherwise we move to the next code in the list. By default we return ``en_US`` if no translations are found for any of the specified locales. You can change the default locale with `set_default_locale()`. """ return Locale.get_closest(*locale_codes) def set_default_locale(code): """Sets the default locale. The default locale is assumed to be the language used for all strings in the system. The translations loaded from disk are mappings from the default locale to the destination locale. Consequently, you don't need to create a translation file for the default locale. """ global _default_locale global _supported_locales _default_locale = code _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) def load_translations(directory, encoding=None): """Loads translations from CSV files in a directory. Translations are strings with optional Python-style named placeholders (e.g., ``My name is %(name)s``) and their associated translations. The directory should have translation files of the form ``LOCALE.csv``, e.g. ``es_GT.csv``. The CSV files should have two or three columns: string, translation, and an optional plural indicator. Plural indicators should be one of "plural" or "singular". A given string can have both singular and plural forms. For example ``%(name)s liked this`` may have a different verb conjugation depending on whether %(name)s is one name or a list of names. There should be two rows in the CSV file for that string, one with plural indicator "singular", and one "plural". For strings with no verbs that would change on translation, simply use "unknown" or the empty string (or don't include the column at all). The file is read using the `csv` module in the default "excel" dialect. In this format there should not be spaces after the commas. If no ``encoding`` parameter is given, the encoding will be detected automatically (among UTF-8 and UTF-16) if the file contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM is present. Example translation ``es_LA.csv``:: "I love you","Te amo" "%(name)s liked this","A %(name)s les gustó esto","plural" "%(name)s liked this","A %(name)s le gustó esto","singular" .. versionchanged:: 4.3 Added ``encoding`` parameter. Added support for BOM-based encoding detection, UTF-16, and UTF-8-with-BOM. """ global _translations global _supported_locales _translations = {} for path in os.listdir(directory): if not path.endswith(".csv"): continue locale, extension = path.split(".") if not re.match("[a-z]+(_[A-Z]+)?$", locale): gen_log.error("Unrecognized locale %r (path: %s)", locale, os.path.join(directory, path)) continue full_path = os.path.join(directory, path) if encoding is None: # Try to autodetect encoding based on the BOM. with open(full_path, 'rb') as f: data = f.read(len(codecs.BOM_UTF16_LE)) if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): encoding = 'utf-16' else: # utf-8-sig is "utf-8 with optional BOM". It's discouraged # in most cases but is common with CSV files because Excel # cannot read utf-8 files without a BOM. encoding = 'utf-8-sig' if PY3: # python 3: csv.reader requires a file open in text mode. # Force utf8 to avoid dependence on $LANG environment variable. f = open(full_path, "r", encoding=encoding) else: # python 2: csv can only handle byte strings (in ascii-compatible # encodings), which we decode below. Transcode everything into # utf8 before passing it to csv.reader. f = BytesIO() with codecs.open(full_path, "r", encoding=encoding) as infile: f.write(escape.utf8(infile.read())) f.seek(0) _translations[locale] = {} for i, row in enumerate(csv.reader(f)): if not row or len(row) < 2: continue row = [escape.to_unicode(c).strip() for c in row] english, translation = row[:2] if len(row) > 2: plural = row[2] or "unknown" else: plural = "unknown" if plural not in ("plural", "singular", "unknown"): gen_log.error("Unrecognized plural indicator %r in %s line %d", plural, path, i + 1) continue _translations[locale].setdefault(plural, {})[english] = translation f.close() _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) gen_log.debug("Supported locales: %s", sorted(_supported_locales)) def load_gettext_translations(directory, domain): """Loads translations from `gettext`'s locale tree Locale tree is similar to system's ``/usr/share/locale``, like:: {directory}/{lang}/LC_MESSAGES/{domain}.mo Three steps are required to have your app translated: 1. Generate POT translation file:: xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc 2. Merge against existing POT file:: msgmerge old.po mydomain.po > new.po 3. Compile:: msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo """ import gettext global _translations global _supported_locales global _use_gettext _translations = {} for lang in os.listdir(directory): if lang.startswith('.'): continue # skip .svn, etc if os.path.isfile(os.path.join(directory, lang)): continue try: os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo")) _translations[lang] = gettext.translation(domain, directory, languages=[lang]) except Exception as e: gen_log.error("Cannot load translation for '%s': %s", lang, str(e)) continue _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) _use_gettext = True gen_log.debug("Supported locales: %s", sorted(_supported_locales)) def get_supported_locales(): """Returns a list of all the supported locale codes.""" return _supported_locales class Locale(object): """Object representing a locale. After calling one of `load_translations` or `load_gettext_translations`, call `get` or `get_closest` to get a Locale object. """ @classmethod def get_closest(cls, *locale_codes): """Returns the closest match for the given locale code.""" for code in locale_codes: if not code: continue code = code.replace("-", "_") parts = code.split("_") if len(parts) > 2: continue elif len(parts) == 2: code = parts[0].lower() + "_" + parts[1].upper() if code in _supported_locales: return cls.get(code) if parts[0].lower() in _supported_locales: return cls.get(parts[0].lower()) return cls.get(_default_locale) @classmethod def get(cls, code): """Returns the Locale for the given locale code. If it is not supported, we raise an exception. """ if not hasattr(cls, "_cache"): cls._cache = {} if code not in cls._cache: assert code in _supported_locales translations = _translations.get(code, None) if translations is None: locale = CSVLocale(code, {}) elif _use_gettext: locale = GettextLocale(code, translations) else: locale = CSVLocale(code, translations) cls._cache[code] = locale return cls._cache[code] def __init__(self, code, translations): self.code = code self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown") self.rtl = False for prefix in ["fa", "ar", "he"]: if self.code.startswith(prefix): self.rtl = True break self.translations = translations # Initialize strings for date formatting _ = self.translate self._months = [ _("January"), _("February"), _("March"), _("April"), _("May"), _("June"), _("July"), _("August"), _("September"), _("October"), _("November"), _("December")] self._weekdays = [ _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), _("Friday"), _("Saturday"), _("Sunday")] def translate(self, message, plural_message=None, count=None): """Returns the translation for the given message for this locale. If ``plural_message`` is given, you must also provide ``count``. We return ``plural_message`` when ``count != 1``, and we return the singular form for the given message when ``count == 1``. """ raise NotImplementedError() def pgettext(self, context, message, plural_message=None, count=None): raise NotImplementedError() def format_date(self, date, gmt_offset=0, relative=True, shorter=False, full_format=False): """Formats the given date (which should be GMT). By default, we return a relative time (e.g., "2 minutes ago"). You can return an absolute date string with ``relative=False``. You can force a full format date ("July 10, 1980") with ``full_format=True``. This method is primarily intended for dates in the past. For dates in the future, we fall back to full format. """ if isinstance(date, numbers.Real): date = datetime.datetime.utcfromtimestamp(date) now = datetime.datetime.utcnow() if date > now: if relative and (date - now).seconds < 60: # Due to click skew, things are some things slightly # in the future. Round timestamps in the immediate # future down to now in relative mode. date = now else: # Otherwise, future dates always use the full format. full_format = True local_date = date - datetime.timedelta(minutes=gmt_offset) local_now = now - datetime.timedelta(minutes=gmt_offset) local_yesterday = local_now - datetime.timedelta(hours=24) difference = now - date seconds = difference.seconds days = difference.days _ = self.translate format = None if not full_format: if relative and days == 0: if seconds < 50: return _("1 second ago", "%(seconds)d seconds ago", seconds) % {"seconds": seconds} if seconds < 50 * 60: minutes = round(seconds / 60.0) return _("1 minute ago", "%(minutes)d minutes ago", minutes) % {"minutes": minutes} hours = round(seconds / (60.0 * 60)) return _("1 hour ago", "%(hours)d hours ago", hours) % {"hours": hours} if days == 0: format = _("%(time)s") elif days == 1 and local_date.day == local_yesterday.day and \ relative: format = _("yesterday") if shorter else \ _("yesterday at %(time)s") elif days < 5: format = _("%(weekday)s") if shorter else \ _("%(weekday)s at %(time)s") elif days < 334: # 11mo, since confusing for same month last year format = _("%(month_name)s %(day)s") if shorter else \ _("%(month_name)s %(day)s at %(time)s") if format is None: format = _("%(month_name)s %(day)s, %(year)s") if shorter else \ _("%(month_name)s %(day)s, %(year)s at %(time)s") tfhour_clock = self.code not in ("en", "en_US", "zh_CN") if tfhour_clock: str_time = "%d:%02d" % (local_date.hour, local_date.minute) elif self.code == "zh_CN": str_time = "%s%d:%02d" % ( (u'\u4e0a\u5348', u'\u4e0b\u5348')[local_date.hour >= 12], local_date.hour % 12 or 12, local_date.minute) else: str_time = "%d:%02d %s" % ( local_date.hour % 12 or 12, local_date.minute, ("am", "pm")[local_date.hour >= 12]) return format % { "month_name": self._months[local_date.month - 1], "weekday": self._weekdays[local_date.weekday()], "day": str(local_date.day), "year": str(local_date.year), "time": str_time } def format_day(self, date, gmt_offset=0, dow=True): """Formats the given date as a day of week. Example: "Monday, January 22". You can remove the day of week with ``dow=False``. """ local_date = date - datetime.timedelta(minutes=gmt_offset) _ = self.translate if dow: return _("%(weekday)s, %(month_name)s %(day)s") % { "month_name": self._months[local_date.month - 1], "weekday": self._weekdays[local_date.weekday()], "day": str(local_date.day), } else: return _("%(month_name)s %(day)s") % { "month_name": self._months[local_date.month - 1], "day": str(local_date.day), } def list(self, parts): """Returns a comma-separated list for the given list of parts. The format is, e.g., "A, B and C", "A and B" or just "A" for lists of size 1. """ _ = self.translate if len(parts) == 0: return "" if len(parts) == 1: return parts[0] comma = u' \u0648 ' if self.code.startswith("fa") else u", " return _("%(commas)s and %(last)s") % { "commas": comma.join(parts[:-1]), "last": parts[len(parts) - 1], } def friendly_number(self, value): """Returns a comma-separated number for the given integer.""" if self.code not in ("en", "en_US"): return str(value) value = str(value) parts = [] while value: parts.append(value[-3:]) value = value[:-3] return ",".join(reversed(parts)) class CSVLocale(Locale): """Locale implementation using tornado's CSV translation format.""" def translate(self, message, plural_message=None, count=None): if plural_message is not None: assert count is not None if count != 1: message = plural_message message_dict = self.translations.get("plural", {}) else: message_dict = self.translations.get("singular", {}) else: message_dict = self.translations.get("unknown", {}) return message_dict.get(message, message) def pgettext(self, context, message, plural_message=None, count=None): if self.translations: gen_log.warning('pgettext is not supported by CSVLocale') return self.translate(message, plural_message, count) class GettextLocale(Locale): """Locale implementation using the `gettext` module.""" def __init__(self, code, translations): try: # python 2 self.ngettext = translations.ungettext self.gettext = translations.ugettext except AttributeError: # python 3 self.ngettext = translations.ngettext self.gettext = translations.gettext # self.gettext must exist before __init__ is called, since it # calls into self.translate super(GettextLocale, self).__init__(code, translations) def translate(self, message, plural_message=None, count=None): if plural_message is not None: assert count is not None return self.ngettext(message, plural_message, count) else: return self.gettext(message) def pgettext(self, context, message, plural_message=None, count=None): """Allows to set context for translation, accepts plural forms. Usage example:: pgettext("law", "right") pgettext("good", "right") Plural message example:: pgettext("organization", "club", "clubs", len(clubs)) pgettext("stick", "club", "clubs", len(clubs)) To generate POT file with context, add following options to step 1 of `load_gettext_translations` sequence:: xgettext [basic options] --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3 .. versionadded:: 4.2 """ if plural_message is not None: assert count is not None msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, message), "%s%s%s" % (context, CONTEXT_SEPARATOR, plural_message), count) result = self.ngettext(*msgs_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = self.ngettext(message, plural_message, count) return result else: msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message) result = self.gettext(msg_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = message return result tornado-4.5.3/tornado/locks.py000066400000000000000000000355621322420601000163250ustar00rootroot00000000000000# Copyright 2015 The Tornado Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function import collections from tornado import gen, ioloop from tornado.concurrent import Future __all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock'] class _TimeoutGarbageCollector(object): """Base class for objects that periodically clean up timed-out waiters. Avoids memory leak in a common pattern like: while True: yield condition.wait(short_timeout) print('looping....') """ def __init__(self): self._waiters = collections.deque() # Futures. self._timeouts = 0 def _garbage_collect(self): # Occasionally clear timed-out waiters. self._timeouts += 1 if self._timeouts > 100: self._timeouts = 0 self._waiters = collections.deque( w for w in self._waiters if not w.done()) class Condition(_TimeoutGarbageCollector): """A condition allows one or more coroutines to wait until notified. Like a standard `threading.Condition`, but does not need an underlying lock that is acquired and released. With a `Condition`, coroutines can wait to be notified by other coroutines: .. testcode:: from tornado import gen from tornado.ioloop import IOLoop from tornado.locks import Condition condition = Condition() @gen.coroutine def waiter(): print("I'll wait right here") yield condition.wait() # Yield a Future. print("I'm done waiting") @gen.coroutine def notifier(): print("About to notify") condition.notify() print("Done notifying") @gen.coroutine def runner(): # Yield two Futures; wait for waiter() and notifier() to finish. yield [waiter(), notifier()] IOLoop.current().run_sync(runner) .. testoutput:: I'll wait right here About to notify Done notifying I'm done waiting `wait` takes an optional ``timeout`` argument, which is either an absolute timestamp:: io_loop = IOLoop.current() # Wait up to 1 second for a notification. yield condition.wait(timeout=io_loop.time() + 1) ...or a `datetime.timedelta` for a timeout relative to the current time:: # Wait up to 1 second. yield condition.wait(timeout=datetime.timedelta(seconds=1)) The method raises `tornado.gen.TimeoutError` if there's no notification before the deadline. """ def __init__(self): super(Condition, self).__init__() self.io_loop = ioloop.IOLoop.current() def __repr__(self): result = '<%s' % (self.__class__.__name__, ) if self._waiters: result += ' waiters[%s]' % len(self._waiters) return result + '>' def wait(self, timeout=None): """Wait for `.notify`. Returns a `.Future` that resolves ``True`` if the condition is notified, or ``False`` after a timeout. """ waiter = Future() self._waiters.append(waiter) if timeout: def on_timeout(): waiter.set_result(False) self._garbage_collect() io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) waiter.add_done_callback( lambda _: io_loop.remove_timeout(timeout_handle)) return waiter def notify(self, n=1): """Wake ``n`` waiters.""" waiters = [] # Waiters we plan to run right now. while n and self._waiters: waiter = self._waiters.popleft() if not waiter.done(): # Might have timed out. n -= 1 waiters.append(waiter) for waiter in waiters: waiter.set_result(True) def notify_all(self): """Wake all waiters.""" self.notify(len(self._waiters)) class Event(object): """An event blocks coroutines until its internal flag is set to True. Similar to `threading.Event`. A coroutine can wait for an event to be set. Once it is set, calls to ``yield event.wait()`` will not block unless the event has been cleared: .. testcode:: from tornado import gen from tornado.ioloop import IOLoop from tornado.locks import Event event = Event() @gen.coroutine def waiter(): print("Waiting for event") yield event.wait() print("Not waiting this time") yield event.wait() print("Done") @gen.coroutine def setter(): print("About to set the event") event.set() @gen.coroutine def runner(): yield [waiter(), setter()] IOLoop.current().run_sync(runner) .. testoutput:: Waiting for event About to set the event Not waiting this time Done """ def __init__(self): self._future = Future() def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, 'set' if self.is_set() else 'clear') def is_set(self): """Return ``True`` if the internal flag is true.""" return self._future.done() def set(self): """Set the internal flag to ``True``. All waiters are awakened. Calling `.wait` once the flag is set will not block. """ if not self._future.done(): self._future.set_result(None) def clear(self): """Reset the internal flag to ``False``. Calls to `.wait` will block until `.set` is called. """ if self._future.done(): self._future = Future() def wait(self, timeout=None): """Block until the internal flag is true. Returns a Future, which raises `tornado.gen.TimeoutError` after a timeout. """ if timeout is None: return self._future else: return gen.with_timeout(timeout, self._future) class _ReleasingContextManager(object): """Releases a Lock or Semaphore at the end of a "with" statement. with (yield semaphore.acquire()): pass # Now semaphore.release() has been called. """ def __init__(self, obj): self._obj = obj def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): self._obj.release() class Semaphore(_TimeoutGarbageCollector): """A lock that can be acquired a fixed number of times before blocking. A Semaphore manages a counter representing the number of `.release` calls minus the number of `.acquire` calls, plus an initial value. The `.acquire` method blocks if necessary until it can return without making the counter negative. Semaphores limit access to a shared resource. To allow access for two workers at a time: .. testsetup:: semaphore from collections import deque from tornado import gen from tornado.ioloop import IOLoop from tornado.concurrent import Future # Ensure reliable doctest output: resolve Futures one at a time. futures_q = deque([Future() for _ in range(3)]) @gen.coroutine def simulator(futures): for f in futures: yield gen.moment f.set_result(None) IOLoop.current().add_callback(simulator, list(futures_q)) def use_some_resource(): return futures_q.popleft() .. testcode:: semaphore from tornado import gen from tornado.ioloop import IOLoop from tornado.locks import Semaphore sem = Semaphore(2) @gen.coroutine def worker(worker_id): yield sem.acquire() try: print("Worker %d is working" % worker_id) yield use_some_resource() finally: print("Worker %d is done" % worker_id) sem.release() @gen.coroutine def runner(): # Join all workers. yield [worker(i) for i in range(3)] IOLoop.current().run_sync(runner) .. testoutput:: semaphore Worker 0 is working Worker 1 is working Worker 0 is done Worker 2 is working Worker 1 is done Worker 2 is done Workers 0 and 1 are allowed to run concurrently, but worker 2 waits until the semaphore has been released once, by worker 0. `.acquire` is a context manager, so ``worker`` could be written as:: @gen.coroutine def worker(worker_id): with (yield sem.acquire()): print("Worker %d is working" % worker_id) yield use_some_resource() # Now the semaphore has been released. print("Worker %d is done" % worker_id) In Python 3.5, the semaphore itself can be used as an async context manager:: async def worker(worker_id): async with sem: print("Worker %d is working" % worker_id) await use_some_resource() # Now the semaphore has been released. print("Worker %d is done" % worker_id) .. versionchanged:: 4.3 Added ``async with`` support in Python 3.5. """ def __init__(self, value=1): super(Semaphore, self).__init__() if value < 0: raise ValueError('semaphore initial value must be >= 0') self._value = value def __repr__(self): res = super(Semaphore, self).__repr__() extra = 'locked' if self._value == 0 else 'unlocked,value:{0}'.format( self._value) if self._waiters: extra = '{0},waiters:{1}'.format(extra, len(self._waiters)) return '<{0} [{1}]>'.format(res[1:-1], extra) def release(self): """Increment the counter and wake one waiter.""" self._value += 1 while self._waiters: waiter = self._waiters.popleft() if not waiter.done(): self._value -= 1 # If the waiter is a coroutine paused at # # with (yield semaphore.acquire()): # # then the context manager's __exit__ calls release() at the end # of the "with" block. waiter.set_result(_ReleasingContextManager(self)) break def acquire(self, timeout=None): """Decrement the counter. Returns a Future. Block if the counter is zero and wait for a `.release`. The Future raises `.TimeoutError` after the deadline. """ waiter = Future() if self._value > 0: self._value -= 1 waiter.set_result(_ReleasingContextManager(self)) else: self._waiters.append(waiter) if timeout: def on_timeout(): waiter.set_exception(gen.TimeoutError()) self._garbage_collect() io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) waiter.add_done_callback( lambda _: io_loop.remove_timeout(timeout_handle)) return waiter def __enter__(self): raise RuntimeError( "Use Semaphore like 'with (yield semaphore.acquire())', not like" " 'with semaphore'") __exit__ = __enter__ @gen.coroutine def __aenter__(self): yield self.acquire() @gen.coroutine def __aexit__(self, typ, value, tb): self.release() class BoundedSemaphore(Semaphore): """A semaphore that prevents release() being called too many times. If `.release` would increment the semaphore's value past the initial value, it raises `ValueError`. Semaphores are mostly used to guard resources with limited capacity, so a semaphore released too many times is a sign of a bug. """ def __init__(self, value=1): super(BoundedSemaphore, self).__init__(value=value) self._initial_value = value def release(self): """Increment the counter and wake one waiter.""" if self._value >= self._initial_value: raise ValueError("Semaphore released too many times") super(BoundedSemaphore, self).release() class Lock(object): """A lock for coroutines. A Lock begins unlocked, and `acquire` locks it immediately. While it is locked, a coroutine that yields `acquire` waits until another coroutine calls `release`. Releasing an unlocked lock raises `RuntimeError`. `acquire` supports the context manager protocol in all Python versions: >>> from tornado import gen, locks >>> lock = locks.Lock() >>> >>> @gen.coroutine ... def f(): ... with (yield lock.acquire()): ... # Do something holding the lock. ... pass ... ... # Now the lock is released. In Python 3.5, `Lock` also supports the async context manager protocol. Note that in this case there is no `acquire`, because ``async with`` includes both the ``yield`` and the ``acquire`` (just as it does with `threading.Lock`): >>> async def f(): # doctest: +SKIP ... async with lock: ... # Do something holding the lock. ... pass ... ... # Now the lock is released. .. versionchanged:: 4.3 Added ``async with`` support in Python 3.5. """ def __init__(self): self._block = BoundedSemaphore(value=1) def __repr__(self): return "<%s _block=%s>" % ( self.__class__.__name__, self._block) def acquire(self, timeout=None): """Attempt to lock. Returns a Future. Returns a Future, which raises `tornado.gen.TimeoutError` after a timeout. """ return self._block.acquire(timeout) def release(self): """Unlock. The first coroutine in line waiting for `acquire` gets the lock. If not locked, raise a `RuntimeError`. """ try: self._block.release() except ValueError: raise RuntimeError('release unlocked lock') def __enter__(self): raise RuntimeError( "Use Lock like 'with (yield lock)', not like 'with lock'") __exit__ = __enter__ @gen.coroutine def __aenter__(self): yield self.acquire() @gen.coroutine def __aexit__(self, typ, value, tb): self.release() tornado-4.5.3/tornado/log.py000066400000000000000000000300431322420601000157600ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Logging support for Tornado. Tornado uses three logger streams: * ``tornado.access``: Per-request logging for Tornado's HTTP servers (and potentially other servers in the future) * ``tornado.application``: Logging of errors from application code (i.e. uncaught exceptions from callbacks) * ``tornado.general``: General-purpose logging, including any errors or warnings from Tornado itself. These streams may be configured independently using the standard library's `logging` module. For example, you may wish to send ``tornado.access`` logs to a separate file for analysis. """ from __future__ import absolute_import, division, print_function import logging import logging.handlers import sys from tornado.escape import _unicode from tornado.util import unicode_type, basestring_type try: import colorama except ImportError: colorama = None try: import curses # type: ignore except ImportError: curses = None # Logger objects for internal tornado use access_log = logging.getLogger("tornado.access") app_log = logging.getLogger("tornado.application") gen_log = logging.getLogger("tornado.general") def _stderr_supports_color(): try: if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): if curses: curses.setupterm() if curses.tigetnum("colors") > 0: return True elif colorama: if sys.stderr is getattr(colorama.initialise, 'wrapped_stderr', object()): return True except Exception: # Very broad exception handling because it's always better to # fall back to non-colored logs than to break at startup. pass return False def _safe_unicode(s): try: return _unicode(s) except UnicodeDecodeError: return repr(s) class LogFormatter(logging.Formatter): """Log formatter used in Tornado. Key features of this formatter are: * Color support when logging to a terminal that supports it. * Timestamps on every log line. * Robust against str/bytes encoding problems. This formatter is enabled automatically by `tornado.options.parse_command_line` or `tornado.options.parse_config_file` (unless ``--logging=none`` is used). Color support on Windows versions that do not support ANSI color codes is enabled by use of the colorama__ library. Applications that wish to use this must first initialize colorama with a call to ``colorama.init``. See the colorama documentation for details. __ https://pypi.python.org/pypi/colorama .. versionchanged:: 4.5 Added support for ``colorama``. Changed the constructor signature to be compatible with `logging.config.dictConfig`. """ DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s' DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S' DEFAULT_COLORS = { logging.DEBUG: 4, # Blue logging.INFO: 2, # Green logging.WARNING: 3, # Yellow logging.ERROR: 1, # Red } def __init__(self, fmt=DEFAULT_FORMAT, datefmt=DEFAULT_DATE_FORMAT, style='%', color=True, colors=DEFAULT_COLORS): r""" :arg bool color: Enables color support. :arg string fmt: Log message format. It will be applied to the attributes dict of log records. The text between ``%(color)s`` and ``%(end_color)s`` will be colored depending on the level if color support is on. :arg dict colors: color mappings from logging level to terminal color code :arg string datefmt: Datetime format. Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. .. versionchanged:: 3.2 Added ``fmt`` and ``datefmt`` arguments. """ logging.Formatter.__init__(self, datefmt=datefmt) self._fmt = fmt self._colors = {} if color and _stderr_supports_color(): if curses is not None: # The curses module has some str/bytes confusion in # python3. Until version 3.2.3, most methods return # bytes, but only accept strings. In addition, we want to # output these strings with the logging module, which # works with unicode strings. The explicit calls to # unicode() below are harmless in python2 but will do the # right conversion in python 3. fg_color = (curses.tigetstr("setaf") or curses.tigetstr("setf") or "") if (3, 0) < sys.version_info < (3, 2, 3): fg_color = unicode_type(fg_color, "ascii") for levelno, code in colors.items(): self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii") self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii") else: # If curses is not present (currently we'll only get here for # colorama on windows), assume hard-coded ANSI color codes. for levelno, code in colors.items(): self._colors[levelno] = '\033[2;3%dm' % code self._normal = '\033[0m' else: self._normal = '' def format(self, record): try: message = record.getMessage() assert isinstance(message, basestring_type) # guaranteed by logging # Encoding notes: The logging module prefers to work with character # strings, but only enforces that log messages are instances of # basestring. In python 2, non-ascii bytestrings will make # their way through the logging framework until they blow up with # an unhelpful decoding error (with this formatter it happens # when we attach the prefix, but there are other opportunities for # exceptions further along in the framework). # # If a byte string makes it this far, convert it to unicode to # ensure it will make it out to the logs. Use repr() as a fallback # to ensure that all byte strings can be converted successfully, # but don't do it by default so we don't add extra quotes to ascii # bytestrings. This is a bit of a hacky place to do this, but # it's worth it since the encoding errors that would otherwise # result are so useless (and tornado is fond of using utf8-encoded # byte strings whereever possible). record.message = _safe_unicode(message) except Exception as e: record.message = "Bad message (%r): %r" % (e, record.__dict__) record.asctime = self.formatTime(record, self.datefmt) if record.levelno in self._colors: record.color = self._colors[record.levelno] record.end_color = self._normal else: record.color = record.end_color = '' formatted = self._fmt % record.__dict__ if record.exc_info: if not record.exc_text: record.exc_text = self.formatException(record.exc_info) if record.exc_text: # exc_text contains multiple lines. We need to _safe_unicode # each line separately so that non-utf8 bytes don't cause # all the newlines to turn into '\n'. lines = [formatted.rstrip()] lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n')) formatted = '\n'.join(lines) return formatted.replace("\n", "\n ") def enable_pretty_logging(options=None, logger=None): """Turns on formatted logging output as configured. This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`. """ if options is None: import tornado.options options = tornado.options.options if options.logging is None or options.logging.lower() == 'none': return if logger is None: logger = logging.getLogger() logger.setLevel(getattr(logging, options.logging.upper())) if options.log_file_prefix: rotate_mode = options.log_rotate_mode if rotate_mode == 'size': channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups) elif rotate_mode == 'time': channel = logging.handlers.TimedRotatingFileHandler( filename=options.log_file_prefix, when=options.log_rotate_when, interval=options.log_rotate_interval, backupCount=options.log_file_num_backups) else: error_message = 'The value of log_rotate_mode option should be ' +\ '"size" or "time", not "%s".' % rotate_mode raise ValueError(error_message) channel.setFormatter(LogFormatter(color=False)) logger.addHandler(channel) if (options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers)): # Set up color if we are in a tty and curses is installed channel = logging.StreamHandler() channel.setFormatter(LogFormatter()) logger.addHandler(channel) def define_logging_options(options=None): """Add logging-related flags to ``options``. These options are present automatically on the default options instance; this method is only necessary if you have created your own `.OptionParser`. .. versionadded:: 4.2 This function existed in prior versions but was broken and undocumented until 4.2. """ if options is None: # late import to prevent cycle import tornado.options options = tornado.options.options options.define("logging", default="info", help=("Set the Python log level. If 'none', tornado won't touch the " "logging configuration."), metavar="debug|info|warning|error|none") options.define("log_to_stderr", type=bool, default=None, help=("Send log output to stderr (colorized if possible). " "By default use stderr if --log_file_prefix is not set and " "no other logging is configured.")) options.define("log_file_prefix", type=str, default=None, metavar="PATH", help=("Path prefix for log files. " "Note that if you are running multiple tornado processes, " "log_file_prefix must be different for each of them (e.g. " "include the port number)")) options.define("log_file_max_size", type=int, default=100 * 1000 * 1000, help="max size of log files before rollover") options.define("log_file_num_backups", type=int, default=10, help="number of log files to keep") options.define("log_rotate_when", type=str, default='midnight', help=("specify the type of TimedRotatingFileHandler interval " "other options:('S', 'M', 'H', 'D', 'W0'-'W6')")) options.define("log_rotate_interval", type=int, default=1, help="The interval value of timed rotating") options.define("log_rotate_mode", type=str, default='size', help="The mode of rotating files(time or size)") options.add_parse_callback(lambda: enable_pretty_logging(options)) tornado-4.5.3/tornado/netutil.py000066400000000000000000000504521322420601000166710ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Miscellaneous network utility code.""" from __future__ import absolute_import, division, print_function import errno import os import sys import socket import stat from tornado.concurrent import dummy_executor, run_on_executor from tornado.ioloop import IOLoop from tornado.platform.auto import set_close_exec from tornado.util import PY3, Configurable, errno_from_exception try: import ssl except ImportError: # ssl is not available on Google App Engine ssl = None try: import certifi except ImportError: # certifi is optional as long as we have ssl.create_default_context. if ssl is None or hasattr(ssl, 'create_default_context'): certifi = None else: raise if PY3: xrange = range if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+ ssl_match_hostname = ssl.match_hostname SSLCertificateError = ssl.CertificateError elif ssl is None: ssl_match_hostname = SSLCertificateError = None # type: ignore else: import backports.ssl_match_hostname ssl_match_hostname = backports.ssl_match_hostname.match_hostname SSLCertificateError = backports.ssl_match_hostname.CertificateError # type: ignore if hasattr(ssl, 'SSLContext'): if hasattr(ssl, 'create_default_context'): # Python 2.7.9+, 3.4+ # Note that the naming of ssl.Purpose is confusing; the purpose # of a context is to authentiate the opposite side of the connection. _client_ssl_defaults = ssl.create_default_context( ssl.Purpose.SERVER_AUTH) _server_ssl_defaults = ssl.create_default_context( ssl.Purpose.CLIENT_AUTH) else: # Python 3.2-3.3 _client_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23) _client_ssl_defaults.verify_mode = ssl.CERT_REQUIRED _client_ssl_defaults.load_verify_locations(certifi.where()) _server_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if hasattr(ssl, 'OP_NO_COMPRESSION'): # Disable TLS compression to avoid CRIME and related attacks. # This constant wasn't added until python 3.3. _client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION _server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION elif ssl: # Python 2.6-2.7.8 _client_ssl_defaults = dict(cert_reqs=ssl.CERT_REQUIRED, ca_certs=certifi.where()) _server_ssl_defaults = {} else: # Google App Engine _client_ssl_defaults = dict(cert_reqs=None, ca_certs=None) _server_ssl_defaults = {} # ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode, # getaddrinfo attempts to import encodings.idna. If this is done at # module-import time, the import lock is already held by the main thread, # leading to deadlock. Avoid it by caching the idna encoder on the main # thread now. u'foo'.encode('idna') # For undiagnosed reasons, 'latin1' codec may also need to be preloaded. u'foo'.encode('latin1') # These errnos indicate that a non-blocking operation must be retried # at a later time. On most platforms they're the same value, but on # some they differ. _ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) if hasattr(errno, "WSAEWOULDBLOCK"): _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore # Default backlog used when calling sock.listen() _DEFAULT_BACKLOG = 128 def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=_DEFAULT_BACKLOG, flags=None, reuse_port=False): """Creates listening sockets bound to the given port and address. Returns a list of socket objects (multiple sockets are returned if the given address maps to multiple IP addresses, which is most common for mixed IPv4 and IPv6 use). Address may be either an IP address or hostname. If it's a hostname, the server will listen on all IP addresses associated with the name. Address may be an empty string or None to listen on all available interfaces. Family may be set to either `socket.AF_INET` or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise both will be used if available. The ``backlog`` argument has the same meaning as for `socket.listen() `. ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``. ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket in the list. If your platform doesn't support this option ValueError will be raised. """ if reuse_port and not hasattr(socket, "SO_REUSEPORT"): raise ValueError("the platform doesn't support SO_REUSEPORT") sockets = [] if address == "": address = None if not socket.has_ipv6 and family == socket.AF_UNSPEC: # Python can be compiled with --disable-ipv6, which causes # operations on AF_INET6 sockets to fail, but does not # automatically exclude those results from getaddrinfo # results. # http://bugs.python.org/issue16208 family = socket.AF_INET if flags is None: flags = socket.AI_PASSIVE bound_port = None for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags)): af, socktype, proto, canonname, sockaddr = res if (sys.platform == 'darwin' and address == 'localhost' and af == socket.AF_INET6 and sockaddr[3] != 0): # Mac OS X includes a link-local address fe80::1%lo0 in the # getaddrinfo results for 'localhost'. However, the firewall # doesn't understand that this is a local address and will # prompt for access (often repeatedly, due to an apparent # bug in its ability to remember granting access to an # application). Skip these addresses. continue try: sock = socket.socket(af, socktype, proto) except socket.error as e: if errno_from_exception(e) == errno.EAFNOSUPPORT: continue raise set_close_exec(sock.fileno()) if os.name != 'nt': sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if reuse_port: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) if af == socket.AF_INET6: # On linux, ipv6 sockets accept ipv4 too by default, # but this makes it impossible to bind to both # 0.0.0.0 in ipv4 and :: in ipv6. On other systems, # separate sockets *must* be used to listen for both ipv4 # and ipv6. For consistency, always disable ipv4 on our # ipv6 sockets and use a separate ipv4 socket when needed. # # Python 2.x on windows doesn't have IPPROTO_IPV6. if hasattr(socket, "IPPROTO_IPV6"): sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) # automatic port allocation with port=None # should bind on the same port on IPv4 and IPv6 host, requested_port = sockaddr[:2] if requested_port == 0 and bound_port is not None: sockaddr = tuple([host, bound_port] + list(sockaddr[2:])) sock.setblocking(0) sock.bind(sockaddr) bound_port = sock.getsockname()[1] sock.listen(backlog) sockets.append(sock) return sockets if hasattr(socket, 'AF_UNIX'): def bind_unix_socket(file, mode=0o600, backlog=_DEFAULT_BACKLOG): """Creates a listening unix socket. If a socket with the given name already exists, it will be deleted. If any other file with that name exists, an exception will be raised. Returns a socket object (not a list of socket objects like `bind_sockets`) """ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) set_close_exec(sock.fileno()) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(0) try: st = os.stat(file) except OSError as err: if errno_from_exception(err) != errno.ENOENT: raise else: if stat.S_ISSOCK(st.st_mode): os.remove(file) else: raise ValueError("File %s exists and is not a socket", file) sock.bind(file) os.chmod(file, mode) sock.listen(backlog) return sock def add_accept_handler(sock, callback, io_loop=None): """Adds an `.IOLoop` event handler to accept new connections on ``sock``. When a connection is accepted, ``callback(connection, address)`` will be run (``connection`` is a socket object, and ``address`` is the address of the other end of the connection). Note that this signature is different from the ``callback(fd, events)`` signature used for `.IOLoop` handlers. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ if io_loop is None: io_loop = IOLoop.current() def accept_handler(fd, events): # More connections may come in while we're handling callbacks; # to prevent starvation of other tasks we must limit the number # of connections we accept at a time. Ideally we would accept # up to the number of connections that were waiting when we # entered this method, but this information is not available # (and rearranging this method to call accept() as many times # as possible before running any callbacks would have adverse # effects on load balancing in multiprocess configurations). # Instead, we use the (default) listen backlog as a rough # heuristic for the number of connections we can reasonably # accept at once. for i in xrange(_DEFAULT_BACKLOG): try: connection, address = sock.accept() except socket.error as e: # _ERRNO_WOULDBLOCK indicate we have accepted every # connection that is available. if errno_from_exception(e) in _ERRNO_WOULDBLOCK: return # ECONNABORTED indicates that there was a connection # but it was closed while still in the accept queue. # (observed on FreeBSD). if errno_from_exception(e) == errno.ECONNABORTED: continue raise set_close_exec(connection.fileno()) callback(connection, address) io_loop.add_handler(sock, accept_handler, IOLoop.READ) def is_valid_ip(ip): """Returns true if the given string is a well-formed IP address. Supports IPv4 and IPv6. """ if not ip or '\x00' in ip: # getaddrinfo resolves empty strings to localhost, and truncates # on zero bytes. return False try: res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_NUMERICHOST) return bool(res) except socket.gaierror as e: if e.args[0] == socket.EAI_NONAME: return False raise return True class Resolver(Configurable): """Configurable asynchronous DNS resolver interface. By default, a blocking implementation is used (which simply calls `socket.getaddrinfo`). An alternative implementation can be chosen with the `Resolver.configure <.Configurable.configure>` class method:: Resolver.configure('tornado.netutil.ThreadedResolver') The implementations of this interface included with Tornado are * `tornado.netutil.BlockingResolver` * `tornado.netutil.ThreadedResolver` * `tornado.netutil.OverrideResolver` * `tornado.platform.twisted.TwistedResolver` * `tornado.platform.caresresolver.CaresResolver` """ @classmethod def configurable_base(cls): return Resolver @classmethod def configurable_default(cls): return BlockingResolver def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None): """Resolves an address. The ``host`` argument is a string which may be a hostname or a literal IP address. Returns a `.Future` whose result is a list of (family, address) pairs, where address is a tuple suitable to pass to `socket.connect ` (i.e. a ``(host, port)`` pair for IPv4; additional fields may be present for IPv6). If a ``callback`` is passed, it will be run with the result as an argument when it is complete. :raises IOError: if the address cannot be resolved. .. versionchanged:: 4.4 Standardized all implementations to raise `IOError`. """ raise NotImplementedError() def close(self): """Closes the `Resolver`, freeing any resources used. .. versionadded:: 3.1 """ pass class ExecutorResolver(Resolver): """Resolver implementation using a `concurrent.futures.Executor`. Use this instead of `ThreadedResolver` when you require additional control over the executor being used. The executor will be shut down when the resolver is closed unless ``close_resolver=False``; use this if you want to reuse the same executor elsewhere. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def initialize(self, io_loop=None, executor=None, close_executor=True): self.io_loop = io_loop or IOLoop.current() if executor is not None: self.executor = executor self.close_executor = close_executor else: self.executor = dummy_executor self.close_executor = False def close(self): if self.close_executor: self.executor.shutdown() self.executor = None @run_on_executor def resolve(self, host, port, family=socket.AF_UNSPEC): # On Solaris, getaddrinfo fails if the given port is not found # in /etc/services and no socket type is given, so we must pass # one here. The socket type used here doesn't seem to actually # matter (we discard the one we get back in the results), # so the addresses we return should still be usable with SOCK_DGRAM. addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM) results = [] for family, socktype, proto, canonname, address in addrinfo: results.append((family, address)) return results class BlockingResolver(ExecutorResolver): """Default `Resolver` implementation, using `socket.getaddrinfo`. The `.IOLoop` will be blocked during the resolution, although the callback will not be run until the next `.IOLoop` iteration. """ def initialize(self, io_loop=None): super(BlockingResolver, self).initialize(io_loop=io_loop) class ThreadedResolver(ExecutorResolver): """Multithreaded non-blocking `Resolver` implementation. Requires the `concurrent.futures` package to be installed (available in the standard library since Python 3.2, installable with ``pip install futures`` in older versions). The thread pool size can be configured with:: Resolver.configure('tornado.netutil.ThreadedResolver', num_threads=10) .. versionchanged:: 3.1 All ``ThreadedResolvers`` share a single thread pool, whose size is set by the first one to be created. """ _threadpool = None # type: ignore _threadpool_pid = None # type: int def initialize(self, io_loop=None, num_threads=10): threadpool = ThreadedResolver._create_threadpool(num_threads) super(ThreadedResolver, self).initialize( io_loop=io_loop, executor=threadpool, close_executor=False) @classmethod def _create_threadpool(cls, num_threads): pid = os.getpid() if cls._threadpool_pid != pid: # Threads cannot survive after a fork, so if our pid isn't what it # was when we created the pool then delete it. cls._threadpool = None if cls._threadpool is None: from concurrent.futures import ThreadPoolExecutor cls._threadpool = ThreadPoolExecutor(num_threads) cls._threadpool_pid = pid return cls._threadpool class OverrideResolver(Resolver): """Wraps a resolver with a mapping of overrides. This can be used to make local DNS changes (e.g. for testing) without modifying system-wide settings. The mapping can contain either host strings or host-port pairs. """ def initialize(self, resolver, mapping): self.resolver = resolver self.mapping = mapping def close(self): self.resolver.close() def resolve(self, host, port, *args, **kwargs): if (host, port) in self.mapping: host, port = self.mapping[(host, port)] elif host in self.mapping: host = self.mapping[host] return self.resolver.resolve(host, port, *args, **kwargs) # These are the keyword arguments to ssl.wrap_socket that must be translated # to their SSLContext equivalents (the other arguments are still passed # to SSLContext.wrap_socket). _SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile', 'cert_reqs', 'ca_certs', 'ciphers']) def ssl_options_to_context(ssl_options): """Try to convert an ``ssl_options`` dictionary to an `~ssl.SSLContext` object. The ``ssl_options`` dictionary contains keywords to be passed to `ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can be used instead. This function converts the dict form to its `~ssl.SSLContext` equivalent, and may be used when a component which accepts both forms needs to upgrade to the `~ssl.SSLContext` version to use features like SNI or NPN. """ if isinstance(ssl_options, dict): assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options if (not hasattr(ssl, 'SSLContext') or isinstance(ssl_options, ssl.SSLContext)): return ssl_options context = ssl.SSLContext( ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23)) if 'certfile' in ssl_options: context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None)) if 'cert_reqs' in ssl_options: context.verify_mode = ssl_options['cert_reqs'] if 'ca_certs' in ssl_options: context.load_verify_locations(ssl_options['ca_certs']) if 'ciphers' in ssl_options: context.set_ciphers(ssl_options['ciphers']) if hasattr(ssl, 'OP_NO_COMPRESSION'): # Disable TLS compression to avoid CRIME and related attacks. # This constant wasn't added until python 3.3. context.options |= ssl.OP_NO_COMPRESSION return context def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs): """Returns an ``ssl.SSLSocket`` wrapping the given socket. ``ssl_options`` may be either an `ssl.SSLContext` object or a dictionary (as accepted by `ssl_options_to_context`). Additional keyword arguments are passed to ``wrap_socket`` (either the `~ssl.SSLContext` method or the `ssl` module function as appropriate). """ context = ssl_options_to_context(ssl_options) if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext): if server_hostname is not None and getattr(ssl, 'HAS_SNI'): # Python doesn't have server-side SNI support so we can't # really unittest this, but it can be manually tested with # python3.2 -m tornado.httpclient https://sni.velox.ch return context.wrap_socket(socket, server_hostname=server_hostname, **kwargs) else: return context.wrap_socket(socket, **kwargs) else: return ssl.wrap_socket(socket, **dict(context, **kwargs)) # type: ignore tornado-4.5.3/tornado/options.py000066400000000000000000000514721322420601000167030ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A command line parsing module that lets modules define their own options. Each module defines its own options which are added to the global option namespace, e.g.:: from tornado.options import define, options define("mysql_host", default="127.0.0.1:3306", help="Main user DB") define("memcache_hosts", default="127.0.0.1:11011", multiple=True, help="Main user memcache servers") def connect(): db = database.Connection(options.mysql_host) ... The ``main()`` method of your application does not need to be aware of all of the options used throughout your program; they are all automatically loaded when the modules are loaded. However, all modules that define options must have been imported before the command line is parsed. Your ``main()`` method can parse the command line or parse a config file with either:: tornado.options.parse_command_line() # or tornado.options.parse_config_file("/etc/server.conf") .. note: When using tornado.options.parse_command_line or tornado.options.parse_config_file, the only options that are set are ones that were previously defined with tornado.options.define. Command line formats are what you would expect (``--myoption=myvalue``). Config files are just Python files. Global names become options, e.g.:: myoption = "myvalue" myotheroption = "myothervalue" We support `datetimes `, `timedeltas `, ints, and floats (just pass a ``type`` kwarg to `define`). We also accept multi-value options. See the documentation for `define()` below. `tornado.options.options` is a singleton instance of `OptionParser`, and the top-level functions in this module (`define`, `parse_command_line`, etc) simply call methods on it. You may create additional `OptionParser` instances to define isolated sets of options, such as for subcommands. .. note:: By default, several options are defined that will configure the standard `logging` module when `parse_command_line` or `parse_config_file` are called. If you want Tornado to leave the logging configuration alone so you can manage it yourself, either pass ``--logging=none`` on the command line or do the following to disable it in code:: from tornado.options import options, parse_command_line options.logging = None parse_command_line() .. versionchanged:: 4.3 Dashes and underscores are fully interchangeable in option names; options can be defined, set, and read with any mix of the two. Dashes are typical for command-line usage while config files require underscores. """ from __future__ import absolute_import, division, print_function import datetime import numbers import re import sys import os import textwrap from tornado.escape import _unicode, native_str from tornado.log import define_logging_options from tornado import stack_context from tornado.util import basestring_type, exec_in class Error(Exception): """Exception raised by errors in the options module.""" pass class OptionParser(object): """A collection of options, a dictionary with object-like access. Normally accessed via static functions in the `tornado.options` module, which reference a global instance. """ def __init__(self): # we have to use self.__dict__ because we override setattr. self.__dict__['_options'] = {} self.__dict__['_parse_callbacks'] = [] self.define("help", type=bool, help="show this help information", callback=self._help_callback) def _normalize_name(self, name): return name.replace('_', '-') def __getattr__(self, name): name = self._normalize_name(name) if isinstance(self._options.get(name), _Option): return self._options[name].value() raise AttributeError("Unrecognized option %r" % name) def __setattr__(self, name, value): name = self._normalize_name(name) if isinstance(self._options.get(name), _Option): return self._options[name].set(value) raise AttributeError("Unrecognized option %r" % name) def __iter__(self): return (opt.name for opt in self._options.values()) def __contains__(self, name): name = self._normalize_name(name) return name in self._options def __getitem__(self, name): return self.__getattr__(name) def __setitem__(self, name, value): return self.__setattr__(name, value) def items(self): """A sequence of (name, value) pairs. .. versionadded:: 3.1 """ return [(opt.name, opt.value()) for name, opt in self._options.items()] def groups(self): """The set of option-groups created by ``define``. .. versionadded:: 3.1 """ return set(opt.group_name for opt in self._options.values()) def group_dict(self, group): """The names and values of options in a group. Useful for copying options into Application settings:: from tornado.options import define, parse_command_line, options define('template_path', group='application') define('static_path', group='application') parse_command_line() application = Application( handlers, **options.group_dict('application')) .. versionadded:: 3.1 """ return dict( (opt.name, opt.value()) for name, opt in self._options.items() if not group or group == opt.group_name) def as_dict(self): """The names and values of all options. .. versionadded:: 3.1 """ return dict( (opt.name, opt.value()) for name, opt in self._options.items()) def define(self, name, default=None, type=None, help=None, metavar=None, multiple=False, group=None, callback=None): """Defines a new command line option. If ``type`` is given (one of str, float, int, datetime, or timedelta) or can be inferred from the ``default``, we parse the command line arguments based on the given type. If ``multiple`` is True, we accept comma-separated values, and the option value is always a list. For multi-value integers, we also accept the syntax ``x:y``, which turns into ``range(x, y)`` - very useful for long integer ranges. ``help`` and ``metavar`` are used to construct the automatically generated command line help string. The help message is formatted like:: --name=METAVAR help string ``group`` is used to group the defined options in logical groups. By default, command line options are grouped by the file in which they are defined. Command line option names must be unique globally. They can be parsed from the command line with `parse_command_line` or parsed from a config file with `parse_config_file`. If a ``callback`` is given, it will be run with the new value whenever the option is changed. This can be used to combine command-line and file-based options:: define("config", type=str, help="path to config file", callback=lambda path: parse_config_file(path, final=False)) With this definition, options in the file specified by ``--config`` will override options set earlier on the command line, but can be overridden by later flags. """ normalized = self._normalize_name(name) if normalized in self._options: raise Error("Option %r already defined in %s" % (normalized, self._options[normalized].file_name)) frame = sys._getframe(0) options_file = frame.f_code.co_filename # Can be called directly, or through top level define() fn, in which # case, step up above that frame to look for real caller. if (frame.f_back.f_code.co_filename == options_file and frame.f_back.f_code.co_name == 'define'): frame = frame.f_back file_name = frame.f_back.f_code.co_filename if file_name == options_file: file_name = "" if type is None: if not multiple and default is not None: type = default.__class__ else: type = str if group: group_name = group else: group_name = file_name option = _Option(name, file_name=file_name, default=default, type=type, help=help, metavar=metavar, multiple=multiple, group_name=group_name, callback=callback) self._options[normalized] = option def parse_command_line(self, args=None, final=True): """Parses all options given on the command line (defaults to `sys.argv`). Note that ``args[0]`` is ignored since it is the program name in `sys.argv`. We return a list of all arguments that are not parsed as options. If ``final`` is ``False``, parse callbacks will not be run. This is useful for applications that wish to combine configurations from multiple sources. """ if args is None: args = sys.argv remaining = [] for i in range(1, len(args)): # All things after the last option are command line arguments if not args[i].startswith("-"): remaining = args[i:] break if args[i] == "--": remaining = args[i + 1:] break arg = args[i].lstrip("-") name, equals, value = arg.partition("=") name = self._normalize_name(name) if name not in self._options: self.print_help() raise Error('Unrecognized command line option: %r' % name) option = self._options[name] if not equals: if option.type == bool: value = "true" else: raise Error('Option %r requires a value' % name) option.parse(value) if final: self.run_parse_callbacks() return remaining def parse_config_file(self, path, final=True): """Parses and loads the Python config file at the given path. If ``final`` is ``False``, parse callbacks will not be run. This is useful for applications that wish to combine configurations from multiple sources. .. versionchanged:: 4.1 Config files are now always interpreted as utf-8 instead of the system default encoding. .. versionchanged:: 4.4 The special variable ``__file__`` is available inside config files, specifying the absolute path to the config file itself. """ config = {'__file__': os.path.abspath(path)} with open(path, 'rb') as f: exec_in(native_str(f.read()), config, config) for name in config: normalized = self._normalize_name(name) if normalized in self._options: self._options[normalized].set(config[name]) if final: self.run_parse_callbacks() def print_help(self, file=None): """Prints all the command line options to stderr (or another file).""" if file is None: file = sys.stderr print("Usage: %s [OPTIONS]" % sys.argv[0], file=file) print("\nOptions:\n", file=file) by_group = {} for option in self._options.values(): by_group.setdefault(option.group_name, []).append(option) for filename, o in sorted(by_group.items()): if filename: print("\n%s options:\n" % os.path.normpath(filename), file=file) o.sort(key=lambda option: option.name) for option in o: # Always print names with dashes in a CLI context. prefix = self._normalize_name(option.name) if option.metavar: prefix += "=" + option.metavar description = option.help or "" if option.default is not None and option.default != '': description += " (default %s)" % option.default lines = textwrap.wrap(description, 79 - 35) if len(prefix) > 30 or len(lines) == 0: lines.insert(0, '') print(" --%-30s %s" % (prefix, lines[0]), file=file) for line in lines[1:]: print("%-34s %s" % (' ', line), file=file) print(file=file) def _help_callback(self, value): if value: self.print_help() sys.exit(0) def add_parse_callback(self, callback): """Adds a parse callback, to be invoked when option parsing is done.""" self._parse_callbacks.append(stack_context.wrap(callback)) def run_parse_callbacks(self): for callback in self._parse_callbacks: callback() def mockable(self): """Returns a wrapper around self that is compatible with `mock.patch `. The `mock.patch ` function (included in the standard library `unittest.mock` package since Python 3.3, or in the third-party ``mock`` package for older versions of Python) is incompatible with objects like ``options`` that override ``__getattr__`` and ``__setattr__``. This function returns an object that can be used with `mock.patch.object ` to modify option values:: with mock.patch.object(options.mockable(), 'name', value): assert options.name == value """ return _Mockable(self) class _Mockable(object): """`mock.patch` compatible wrapper for `OptionParser`. As of ``mock`` version 1.0.1, when an object uses ``__getattr__`` hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete the attribute it set instead of setting a new one (assuming that the object does not catpure ``__setattr__``, so the patch created a new attribute in ``__dict__``). _Mockable's getattr and setattr pass through to the underlying OptionParser, and delattr undoes the effect of a previous setattr. """ def __init__(self, options): # Modify __dict__ directly to bypass __setattr__ self.__dict__['_options'] = options self.__dict__['_originals'] = {} def __getattr__(self, name): return getattr(self._options, name) def __setattr__(self, name, value): assert name not in self._originals, "don't reuse mockable objects" self._originals[name] = getattr(self._options, name) setattr(self._options, name, value) def __delattr__(self, name): setattr(self._options, name, self._originals.pop(name)) class _Option(object): UNSET = object() def __init__(self, name, default=None, type=basestring_type, help=None, metavar=None, multiple=False, file_name=None, group_name=None, callback=None): if default is None and multiple: default = [] self.name = name self.type = type self.help = help self.metavar = metavar self.multiple = multiple self.file_name = file_name self.group_name = group_name self.callback = callback self.default = default self._value = _Option.UNSET def value(self): return self.default if self._value is _Option.UNSET else self._value def parse(self, value): _parse = { datetime.datetime: self._parse_datetime, datetime.timedelta: self._parse_timedelta, bool: self._parse_bool, basestring_type: self._parse_string, }.get(self.type, self.type) if self.multiple: self._value = [] for part in value.split(","): if issubclass(self.type, numbers.Integral): # allow ranges of the form X:Y (inclusive at both ends) lo, _, hi = part.partition(":") lo = _parse(lo) hi = _parse(hi) if hi else lo self._value.extend(range(lo, hi + 1)) else: self._value.append(_parse(part)) else: self._value = _parse(value) if self.callback is not None: self.callback(self._value) return self.value() def set(self, value): if self.multiple: if not isinstance(value, list): raise Error("Option %r is required to be a list of %s" % (self.name, self.type.__name__)) for item in value: if item is not None and not isinstance(item, self.type): raise Error("Option %r is required to be a list of %s" % (self.name, self.type.__name__)) else: if value is not None and not isinstance(value, self.type): raise Error("Option %r is required to be a %s (%s given)" % (self.name, self.type.__name__, type(value))) self._value = value if self.callback is not None: self.callback(self._value) # Supported date/time formats in our options _DATETIME_FORMATS = [ "%a %b %d %H:%M:%S %Y", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%dT%H:%M", "%Y%m%d %H:%M:%S", "%Y%m%d %H:%M", "%Y-%m-%d", "%Y%m%d", "%H:%M:%S", "%H:%M", ] def _parse_datetime(self, value): for format in self._DATETIME_FORMATS: try: return datetime.datetime.strptime(value, format) except ValueError: pass raise Error('Unrecognized date/time format: %r' % value) _TIMEDELTA_ABBREV_DICT = { 'h': 'hours', 'm': 'minutes', 'min': 'minutes', 's': 'seconds', 'sec': 'seconds', 'ms': 'milliseconds', 'us': 'microseconds', 'd': 'days', 'w': 'weeks', } _FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?' _TIMEDELTA_PATTERN = re.compile( r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE) def _parse_timedelta(self, value): try: sum = datetime.timedelta() start = 0 while start < len(value): m = self._TIMEDELTA_PATTERN.match(value, start) if not m: raise Exception() num = float(m.group(1)) units = m.group(2) or 'seconds' units = self._TIMEDELTA_ABBREV_DICT.get(units, units) sum += datetime.timedelta(**{units: num}) start = m.end() return sum except Exception: raise def _parse_bool(self, value): return value.lower() not in ("false", "0", "f") def _parse_string(self, value): return _unicode(value) options = OptionParser() """Global options object. All defined options are available as attributes on this object. """ def define(name, default=None, type=None, help=None, metavar=None, multiple=False, group=None, callback=None): """Defines an option in the global namespace. See `OptionParser.define`. """ return options.define(name, default=default, type=type, help=help, metavar=metavar, multiple=multiple, group=group, callback=callback) def parse_command_line(args=None, final=True): """Parses global options from the command line. See `OptionParser.parse_command_line`. """ return options.parse_command_line(args, final=final) def parse_config_file(path, final=True): """Parses global options from a config file. See `OptionParser.parse_config_file`. """ return options.parse_config_file(path, final=final) def print_help(file=None): """Prints all the command line options to stderr (or another file). See `OptionParser.print_help`. """ return options.print_help(file) def add_parse_callback(callback): """Adds a parse callback, to be invoked when option parsing is done. See `OptionParser.add_parse_callback` """ options.add_parse_callback(callback) # Default options define_logging_options(options) tornado-4.5.3/tornado/platform/000077500000000000000000000000001322420601000164515ustar00rootroot00000000000000tornado-4.5.3/tornado/platform/__init__.py000066400000000000000000000000001322420601000205500ustar00rootroot00000000000000tornado-4.5.3/tornado/platform/asyncio.py000066400000000000000000000173631322420601000205020ustar00rootroot00000000000000"""Bridges between the `asyncio` module and Tornado IOLoop. .. versionadded:: 3.2 This module integrates Tornado with the ``asyncio`` module introduced in Python 3.4 (and available `as a separate download `_ for Python 3.3). This makes it possible to combine the two libraries on the same event loop. Most applications should use `AsyncIOMainLoop` to run Tornado on the default ``asyncio`` event loop. Applications that need to run event loops on multiple threads may use `AsyncIOLoop` to create multiple loops. .. note:: Tornado requires the `~asyncio.AbstractEventLoop.add_reader` family of methods, so it is not compatible with the `~asyncio.ProactorEventLoop` on Windows. Use the `~asyncio.SelectorEventLoop` instead. """ from __future__ import absolute_import, division, print_function import functools import tornado.concurrent from tornado.gen import convert_yielded from tornado.ioloop import IOLoop from tornado import stack_context try: # Import the real asyncio module for py33+ first. Older versions of the # trollius backport also use this name. import asyncio # type: ignore except ImportError as e: # Asyncio itself isn't available; see if trollius is (backport to py26+). try: import trollius as asyncio # type: ignore except ImportError: # Re-raise the original asyncio error, not the trollius one. raise e class BaseAsyncIOLoop(IOLoop): def initialize(self, asyncio_loop, close_loop=False, **kwargs): super(BaseAsyncIOLoop, self).initialize(**kwargs) self.asyncio_loop = asyncio_loop self.close_loop = close_loop # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler) self.handlers = {} # Set of fds listening for reads/writes self.readers = set() self.writers = set() self.closing = False def close(self, all_fds=False): self.closing = True for fd in list(self.handlers): fileobj, handler_func = self.handlers[fd] self.remove_handler(fd) if all_fds: self.close_fd(fileobj) if self.close_loop: self.asyncio_loop.close() def add_handler(self, fd, handler, events): fd, fileobj = self.split_fd(fd) if fd in self.handlers: raise ValueError("fd %s added twice" % fd) self.handlers[fd] = (fileobj, stack_context.wrap(handler)) if events & IOLoop.READ: self.asyncio_loop.add_reader( fd, self._handle_events, fd, IOLoop.READ) self.readers.add(fd) if events & IOLoop.WRITE: self.asyncio_loop.add_writer( fd, self._handle_events, fd, IOLoop.WRITE) self.writers.add(fd) def update_handler(self, fd, events): fd, fileobj = self.split_fd(fd) if events & IOLoop.READ: if fd not in self.readers: self.asyncio_loop.add_reader( fd, self._handle_events, fd, IOLoop.READ) self.readers.add(fd) else: if fd in self.readers: self.asyncio_loop.remove_reader(fd) self.readers.remove(fd) if events & IOLoop.WRITE: if fd not in self.writers: self.asyncio_loop.add_writer( fd, self._handle_events, fd, IOLoop.WRITE) self.writers.add(fd) else: if fd in self.writers: self.asyncio_loop.remove_writer(fd) self.writers.remove(fd) def remove_handler(self, fd): fd, fileobj = self.split_fd(fd) if fd not in self.handlers: return if fd in self.readers: self.asyncio_loop.remove_reader(fd) self.readers.remove(fd) if fd in self.writers: self.asyncio_loop.remove_writer(fd) self.writers.remove(fd) del self.handlers[fd] def _handle_events(self, fd, events): fileobj, handler_func = self.handlers[fd] handler_func(fileobj, events) def start(self): old_current = IOLoop.current(instance=False) try: self._setup_logging() self.make_current() self.asyncio_loop.run_forever() finally: if old_current is None: IOLoop.clear_current() else: old_current.make_current() def stop(self): self.asyncio_loop.stop() def call_at(self, when, callback, *args, **kwargs): # asyncio.call_at supports *args but not **kwargs, so bind them here. # We do not synchronize self.time and asyncio_loop.time, so # convert from absolute to relative. return self.asyncio_loop.call_later( max(0, when - self.time()), self._run_callback, functools.partial(stack_context.wrap(callback), *args, **kwargs)) def remove_timeout(self, timeout): timeout.cancel() def add_callback(self, callback, *args, **kwargs): if self.closing: # TODO: this is racy; we need a lock to ensure that the # loop isn't closed during call_soon_threadsafe. raise RuntimeError("IOLoop is closing") self.asyncio_loop.call_soon_threadsafe( self._run_callback, functools.partial(stack_context.wrap(callback), *args, **kwargs)) add_callback_from_signal = add_callback class AsyncIOMainLoop(BaseAsyncIOLoop): """``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the current ``asyncio`` event loop (i.e. the one returned by ``asyncio.get_event_loop()``). Recommended usage:: from tornado.platform.asyncio import AsyncIOMainLoop import asyncio AsyncIOMainLoop().install() asyncio.get_event_loop().run_forever() See also :meth:`tornado.ioloop.IOLoop.install` for general notes on installing alternative IOLoops. """ def initialize(self, **kwargs): super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), close_loop=False, **kwargs) class AsyncIOLoop(BaseAsyncIOLoop): """``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop. This class follows the usual Tornado semantics for creating new ``IOLoops``; these loops are not necessarily related to the ``asyncio`` default event loop. Recommended usage:: from tornado.ioloop import IOLoop IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop') IOLoop.current().start() Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object can be accessed with the ``asyncio_loop`` attribute. """ def initialize(self, **kwargs): loop = asyncio.new_event_loop() try: super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs) except Exception: # If initialize() does not succeed (taking ownership of the loop), # we have to close it. loop.close() raise def to_tornado_future(asyncio_future): """Convert an `asyncio.Future` to a `tornado.concurrent.Future`. .. versionadded:: 4.1 """ tf = tornado.concurrent.Future() tornado.concurrent.chain_future(asyncio_future, tf) return tf def to_asyncio_future(tornado_future): """Convert a Tornado yieldable object to an `asyncio.Future`. .. versionadded:: 4.1 .. versionchanged:: 4.3 Now accepts any yieldable object, not just `tornado.concurrent.Future`. """ tornado_future = convert_yielded(tornado_future) af = asyncio.Future() tornado.concurrent.chain_future(tornado_future, af) return af if hasattr(convert_yielded, 'register'): convert_yielded.register(asyncio.Future, to_tornado_future) # type: ignore tornado-4.5.3/tornado/platform/auto.py000066400000000000000000000035531322420601000200010ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of platform-specific functionality. For each function or class described in `tornado.platform.interface`, the appropriate platform-specific implementation exists in this module. Most code that needs access to this functionality should do e.g.:: from tornado.platform.auto import set_close_exec """ from __future__ import absolute_import, division, print_function import os if 'APPENGINE_RUNTIME' in os.environ: from tornado.platform.common import Waker def set_close_exec(fd): pass elif os.name == 'nt': from tornado.platform.common import Waker from tornado.platform.windows import set_close_exec else: from tornado.platform.posix import set_close_exec, Waker try: # monotime monkey-patches the time module to have a monotonic function # in versions of python before 3.3. import monotime # Silence pyflakes warning about this unused import monotime except ImportError: pass try: # monotonic can provide a monotonic function in versions of python before # 3.3, too. from monotonic import monotonic as monotonic_time except ImportError: try: from time import monotonic as monotonic_time except ImportError: monotonic_time = None __all__ = ['Waker', 'set_close_exec', 'monotonic_time'] tornado-4.5.3/tornado/platform/auto.pyi000066400000000000000000000002141322420601000201410ustar00rootroot00000000000000# auto.py is full of patterns mypy doesn't like, so for type checking # purposes we replace it with interface.py. from .interface import * tornado-4.5.3/tornado/platform/caresresolver.py000066400000000000000000000060141322420601000217030ustar00rootroot00000000000000from __future__ import absolute_import, division, print_function import pycares # type: ignore import socket from tornado import gen from tornado.ioloop import IOLoop from tornado.netutil import Resolver, is_valid_ip class CaresResolver(Resolver): """Name resolver based on the c-ares library. This is a non-blocking and non-threaded resolver. It may not produce the same results as the system resolver, but can be used for non-blocking resolution when threads cannot be used. c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``, so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is the default for ``tornado.simple_httpclient``, but other libraries may default to ``AF_UNSPEC``. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def initialize(self, io_loop=None): self.io_loop = io_loop or IOLoop.current() self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb) self.fds = {} def _sock_state_cb(self, fd, readable, writable): state = ((IOLoop.READ if readable else 0) | (IOLoop.WRITE if writable else 0)) if not state: self.io_loop.remove_handler(fd) del self.fds[fd] elif fd in self.fds: self.io_loop.update_handler(fd, state) self.fds[fd] = state else: self.io_loop.add_handler(fd, self._handle_events, state) self.fds[fd] = state def _handle_events(self, fd, events): read_fd = pycares.ARES_SOCKET_BAD write_fd = pycares.ARES_SOCKET_BAD if events & IOLoop.READ: read_fd = fd if events & IOLoop.WRITE: write_fd = fd self.channel.process_fd(read_fd, write_fd) @gen.coroutine def resolve(self, host, port, family=0): if is_valid_ip(host): addresses = [host] else: # gethostbyname doesn't take callback as a kwarg self.channel.gethostbyname(host, family, (yield gen.Callback(1))) callback_args = yield gen.Wait(1) assert isinstance(callback_args, gen.Arguments) assert not callback_args.kwargs result, error = callback_args.args if error: raise IOError('C-Ares returned error %s: %s while resolving %s' % (error, pycares.errno.strerror(error), host)) addresses = result.addresses addrinfo = [] for address in addresses: if '.' in address: address_family = socket.AF_INET elif ':' in address: address_family = socket.AF_INET6 else: address_family = socket.AF_UNSPEC if family != socket.AF_UNSPEC and family != address_family: raise IOError('Requested socket family %d but got %d' % (family, address_family)) addrinfo.append((address_family, (address, port))) raise gen.Return(addrinfo) tornado-4.5.3/tornado/platform/common.py000066400000000000000000000076221322420601000203220ustar00rootroot00000000000000"""Lowest-common-denominator implementations of platform functionality.""" from __future__ import absolute_import, division, print_function import errno import socket import time from tornado.platform import interface from tornado.util import errno_from_exception def try_close(f): # Avoid issue #875 (race condition when using the file in another # thread). for i in range(10): try: f.close() except IOError: # Yield to another thread time.sleep(1e-3) else: break # Try a last time and let raise f.close() class Waker(interface.Waker): """Create an OS independent asynchronous pipe. For use on platforms that don't have os.pipe() (or where pipes cannot be passed to select()), but do have sockets. This includes Windows and Jython. """ def __init__(self): from .auto import set_close_exec # Based on Zope select_trigger.py: # https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py self.writer = socket.socket() set_close_exec(self.writer.fileno()) # Disable buffering -- pulling the trigger sends 1 byte, # and we want that sent immediately, to wake up ASAP. self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) count = 0 while 1: count += 1 # Bind to a local port; for efficiency, let the OS pick # a free port for us. # Unfortunately, stress tests showed that we may not # be able to connect to that port ("Address already in # use") despite that the OS picked it. This appears # to be a race bug in the Windows socket implementation. # So we loop until a connect() succeeds (almost always # on the first try). See the long thread at # http://mail.zope.org/pipermail/zope/2005-July/160433.html # for hideous details. a = socket.socket() set_close_exec(a.fileno()) a.bind(("127.0.0.1", 0)) a.listen(1) connect_address = a.getsockname() # assigned (host, port) pair try: self.writer.connect(connect_address) break # success except socket.error as detail: if (not hasattr(errno, 'WSAEADDRINUSE') or errno_from_exception(detail) != errno.WSAEADDRINUSE): # "Address already in use" is the only error # I've seen on two WinXP Pro SP2 boxes, under # Pythons 2.3.5 and 2.4.1. raise # (10048, 'Address already in use') # assert count <= 2 # never triggered in Tim's tests if count >= 10: # I've never seen it go above 2 a.close() self.writer.close() raise socket.error("Cannot bind trigger!") # Close `a` and try again. Note: I originally put a short # sleep() here, but it didn't appear to help or hurt. a.close() self.reader, addr = a.accept() set_close_exec(self.reader.fileno()) self.reader.setblocking(0) self.writer.setblocking(0) a.close() self.reader_fd = self.reader.fileno() def fileno(self): return self.reader.fileno() def write_fileno(self): return self.writer.fileno() def wake(self): try: self.writer.send(b"x") except (IOError, socket.error, ValueError): pass def consume(self): try: while True: result = self.reader.recv(1024) if not result: break except (IOError, socket.error): pass def close(self): self.reader.close() try_close(self.writer) tornado-4.5.3/tornado/platform/epoll.py000066400000000000000000000016261322420601000201430ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """EPoll-based IOLoop implementation for Linux systems.""" from __future__ import absolute_import, division, print_function import select from tornado.ioloop import PollIOLoop class EPollIOLoop(PollIOLoop): def initialize(self, **kwargs): super(EPollIOLoop, self).initialize(impl=select.epoll(), **kwargs) tornado-4.5.3/tornado/platform/interface.py000066400000000000000000000043541322420601000207710ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Interfaces for platform-specific functionality. This module exists primarily for documentation purposes and as base classes for other tornado.platform modules. Most code should import the appropriate implementation from `tornado.platform.auto`. """ from __future__ import absolute_import, division, print_function def set_close_exec(fd): """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor.""" raise NotImplementedError() class Waker(object): """A socket-like object that can wake another thread from ``select()``. The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to its ``select`` (or ``epoll`` or ``kqueue``) calls. When another thread wants to wake up the loop, it calls `wake`. Once it has woken up, it will call `consume` to do any necessary per-wake cleanup. When the ``IOLoop`` is closed, it closes its waker too. """ def fileno(self): """Returns the read file descriptor for this waker. Must be suitable for use with ``select()`` or equivalent on the local platform. """ raise NotImplementedError() def write_fileno(self): """Returns the write file descriptor for this waker.""" raise NotImplementedError() def wake(self): """Triggers activity on the waker's file descriptor.""" raise NotImplementedError() def consume(self): """Called after the listen has woken up to do any necessary cleanup.""" raise NotImplementedError() def close(self): """Closes the waker's file descriptor(s).""" raise NotImplementedError() def monotonic_time(): raise NotImplementedError() tornado-4.5.3/tornado/platform/kqueue.py000066400000000000000000000065271322420601000203340ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """KQueue-based IOLoop implementation for BSD/Mac systems.""" from __future__ import absolute_import, division, print_function import select from tornado.ioloop import IOLoop, PollIOLoop assert hasattr(select, 'kqueue'), 'kqueue not supported' class _KQueue(object): """A kqueue-based event loop for BSD/Mac systems.""" def __init__(self): self._kqueue = select.kqueue() self._active = {} def fileno(self): return self._kqueue.fileno() def close(self): self._kqueue.close() def register(self, fd, events): if fd in self._active: raise IOError("fd %s already registered" % fd) self._control(fd, events, select.KQ_EV_ADD) self._active[fd] = events def modify(self, fd, events): self.unregister(fd) self.register(fd, events) def unregister(self, fd): events = self._active.pop(fd) self._control(fd, events, select.KQ_EV_DELETE) def _control(self, fd, events, flags): kevents = [] if events & IOLoop.WRITE: kevents.append(select.kevent( fd, filter=select.KQ_FILTER_WRITE, flags=flags)) if events & IOLoop.READ: kevents.append(select.kevent( fd, filter=select.KQ_FILTER_READ, flags=flags)) # Even though control() takes a list, it seems to return EINVAL # on Mac OS X (10.6) when there is more than one event in the list. for kevent in kevents: self._kqueue.control([kevent], 0) def poll(self, timeout): kevents = self._kqueue.control(None, 1000, timeout) events = {} for kevent in kevents: fd = kevent.ident if kevent.filter == select.KQ_FILTER_READ: events[fd] = events.get(fd, 0) | IOLoop.READ if kevent.filter == select.KQ_FILTER_WRITE: if kevent.flags & select.KQ_EV_EOF: # If an asynchronous connection is refused, kqueue # returns a write event with the EOF flag set. # Turn this into an error for consistency with the # other IOLoop implementations. # Note that for read events, EOF may be returned before # all data has been consumed from the socket buffer, # so we only check for EOF on write events. events[fd] = IOLoop.ERROR else: events[fd] = events.get(fd, 0) | IOLoop.WRITE if kevent.flags & select.KQ_EV_ERROR: events[fd] = events.get(fd, 0) | IOLoop.ERROR return events.items() class KQueueIOLoop(PollIOLoop): def initialize(self, **kwargs): super(KQueueIOLoop, self).initialize(impl=_KQueue(), **kwargs) tornado-4.5.3/tornado/platform/posix.py000066400000000000000000000035231322420601000201700ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Posix implementations of platform-specific functionality.""" from __future__ import absolute_import, division, print_function import fcntl import os from tornado.platform import common, interface def set_close_exec(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) def _set_nonblocking(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) class Waker(interface.Waker): def __init__(self): r, w = os.pipe() _set_nonblocking(r) _set_nonblocking(w) set_close_exec(r) set_close_exec(w) self.reader = os.fdopen(r, "rb", 0) self.writer = os.fdopen(w, "wb", 0) def fileno(self): return self.reader.fileno() def write_fileno(self): return self.writer.fileno() def wake(self): try: self.writer.write(b"x") except (IOError, ValueError): pass def consume(self): try: while True: result = self.reader.read() if not result: break except IOError: pass def close(self): self.reader.close() common.try_close(self.writer) tornado-4.5.3/tornado/platform/select.py000066400000000000000000000050721322420601000203060ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Select-based IOLoop implementation. Used as a fallback for systems that don't support epoll or kqueue. """ from __future__ import absolute_import, division, print_function import select from tornado.ioloop import IOLoop, PollIOLoop class _Select(object): """A simple, select()-based IOLoop implementation for non-Linux systems""" def __init__(self): self.read_fds = set() self.write_fds = set() self.error_fds = set() self.fd_sets = (self.read_fds, self.write_fds, self.error_fds) def close(self): pass def register(self, fd, events): if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds: raise IOError("fd %s already registered" % fd) if events & IOLoop.READ: self.read_fds.add(fd) if events & IOLoop.WRITE: self.write_fds.add(fd) if events & IOLoop.ERROR: self.error_fds.add(fd) # Closed connections are reported as errors by epoll and kqueue, # but as zero-byte reads by select, so when errors are requested # we need to listen for both read and error. # self.read_fds.add(fd) def modify(self, fd, events): self.unregister(fd) self.register(fd, events) def unregister(self, fd): self.read_fds.discard(fd) self.write_fds.discard(fd) self.error_fds.discard(fd) def poll(self, timeout): readable, writeable, errors = select.select( self.read_fds, self.write_fds, self.error_fds, timeout) events = {} for fd in readable: events[fd] = events.get(fd, 0) | IOLoop.READ for fd in writeable: events[fd] = events.get(fd, 0) | IOLoop.WRITE for fd in errors: events[fd] = events.get(fd, 0) | IOLoop.ERROR return events.items() class SelectIOLoop(PollIOLoop): def initialize(self, **kwargs): super(SelectIOLoop, self).initialize(impl=_Select(), **kwargs) tornado-4.5.3/tornado/platform/twisted.py000066400000000000000000000530051322420601000205110ustar00rootroot00000000000000# Author: Ovidiu Predescu # Date: July 2011 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Bridges between the Twisted reactor and Tornado IOLoop. This module lets you run applications and libraries written for Twisted in a Tornado application. It can be used in two modes, depending on which library's underlying event loop you want to use. This module has been tested with Twisted versions 11.0.0 and newer. """ from __future__ import absolute_import, division, print_function import datetime import functools import numbers import socket import sys import twisted.internet.abstract # type: ignore from twisted.internet.defer import Deferred # type: ignore from twisted.internet.posixbase import PosixReactorBase # type: ignore from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore from twisted.python import failure, log # type: ignore from twisted.internet import error # type: ignore import twisted.names.cache # type: ignore import twisted.names.client # type: ignore import twisted.names.hosts # type: ignore import twisted.names.resolve # type: ignore from zope.interface import implementer # type: ignore from tornado.concurrent import Future from tornado.escape import utf8 from tornado import gen import tornado.ioloop from tornado.log import app_log from tornado.netutil import Resolver from tornado.stack_context import NullContext, wrap from tornado.ioloop import IOLoop from tornado.util import timedelta_to_seconds @implementer(IDelayedCall) class TornadoDelayedCall(object): """DelayedCall object for Tornado.""" def __init__(self, reactor, seconds, f, *args, **kw): self._reactor = reactor self._func = functools.partial(f, *args, **kw) self._time = self._reactor.seconds() + seconds self._timeout = self._reactor._io_loop.add_timeout(self._time, self._called) self._active = True def _called(self): self._active = False self._reactor._removeDelayedCall(self) try: self._func() except: app_log.error("_called caught exception", exc_info=True) def getTime(self): return self._time def cancel(self): self._active = False self._reactor._io_loop.remove_timeout(self._timeout) self._reactor._removeDelayedCall(self) def delay(self, seconds): self._reactor._io_loop.remove_timeout(self._timeout) self._time += seconds self._timeout = self._reactor._io_loop.add_timeout(self._time, self._called) def reset(self, seconds): self._reactor._io_loop.remove_timeout(self._timeout) self._time = self._reactor.seconds() + seconds self._timeout = self._reactor._io_loop.add_timeout(self._time, self._called) def active(self): return self._active @implementer(IReactorTime, IReactorFDSet) class TornadoReactor(PosixReactorBase): """Twisted reactor built on the Tornado IOLoop. `TornadoReactor` implements the Twisted reactor interface on top of the Tornado IOLoop. To use it, simply call `install` at the beginning of the application:: import tornado.platform.twisted tornado.platform.twisted.install() from twisted.internet import reactor When the app is ready to start, call ``IOLoop.current().start()`` instead of ``reactor.run()``. It is also possible to create a non-global reactor by calling ``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if the `.IOLoop` and reactor are to be short-lived (such as those used in unit tests), additional cleanup may be required. Specifically, it is recommended to call:: reactor.fireSystemEvent('shutdown') reactor.disconnectAll() before closing the `.IOLoop`. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def __init__(self, io_loop=None): if not io_loop: io_loop = tornado.ioloop.IOLoop.current() self._io_loop = io_loop self._readers = {} # map of reader objects to fd self._writers = {} # map of writer objects to fd self._fds = {} # a map of fd to a (reader, writer) tuple self._delayedCalls = {} PosixReactorBase.__init__(self) self.addSystemEventTrigger('during', 'shutdown', self.crash) # IOLoop.start() bypasses some of the reactor initialization. # Fire off the necessary events if they weren't already triggered # by reactor.run(). def start_if_necessary(): if not self._started: self.fireSystemEvent('startup') self._io_loop.add_callback(start_if_necessary) # IReactorTime def seconds(self): return self._io_loop.time() def callLater(self, seconds, f, *args, **kw): dc = TornadoDelayedCall(self, seconds, f, *args, **kw) self._delayedCalls[dc] = True return dc def getDelayedCalls(self): return [x for x in self._delayedCalls if x._active] def _removeDelayedCall(self, dc): if dc in self._delayedCalls: del self._delayedCalls[dc] # IReactorThreads def callFromThread(self, f, *args, **kw): assert callable(f), "%s is not callable" % f with NullContext(): # This NullContext is mainly for an edge case when running # TwistedIOLoop on top of a TornadoReactor. # TwistedIOLoop.add_callback uses reactor.callFromThread and # should not pick up additional StackContexts along the way. self._io_loop.add_callback(f, *args, **kw) # We don't need the waker code from the super class, Tornado uses # its own waker. def installWaker(self): pass def wakeUp(self): pass # IReactorFDSet def _invoke_callback(self, fd, events): if fd not in self._fds: return (reader, writer) = self._fds[fd] if reader: err = None if reader.fileno() == -1: err = error.ConnectionLost() elif events & IOLoop.READ: err = log.callWithLogger(reader, reader.doRead) if err is None and events & IOLoop.ERROR: err = error.ConnectionLost() if err is not None: self.removeReader(reader) reader.readConnectionLost(failure.Failure(err)) if writer: err = None if writer.fileno() == -1: err = error.ConnectionLost() elif events & IOLoop.WRITE: err = log.callWithLogger(writer, writer.doWrite) if err is None and events & IOLoop.ERROR: err = error.ConnectionLost() if err is not None: self.removeWriter(writer) writer.writeConnectionLost(failure.Failure(err)) def addReader(self, reader): if reader in self._readers: # Don't add the reader if it's already there return fd = reader.fileno() self._readers[reader] = fd if fd in self._fds: (_, writer) = self._fds[fd] self._fds[fd] = (reader, writer) if writer: # We already registered this fd for write events, # update it for read events as well. self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE) else: with NullContext(): self._fds[fd] = (reader, None) self._io_loop.add_handler(fd, self._invoke_callback, IOLoop.READ) def addWriter(self, writer): if writer in self._writers: return fd = writer.fileno() self._writers[writer] = fd if fd in self._fds: (reader, _) = self._fds[fd] self._fds[fd] = (reader, writer) if reader: # We already registered this fd for read events, # update it for write events as well. self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE) else: with NullContext(): self._fds[fd] = (None, writer) self._io_loop.add_handler(fd, self._invoke_callback, IOLoop.WRITE) def removeReader(self, reader): if reader in self._readers: fd = self._readers.pop(reader) (_, writer) = self._fds[fd] if writer: # We have a writer so we need to update the IOLoop for # write events only. self._fds[fd] = (None, writer) self._io_loop.update_handler(fd, IOLoop.WRITE) else: # Since we have no writer registered, we remove the # entry from _fds and unregister the handler from the # IOLoop del self._fds[fd] self._io_loop.remove_handler(fd) def removeWriter(self, writer): if writer in self._writers: fd = self._writers.pop(writer) (reader, _) = self._fds[fd] if reader: # We have a reader so we need to update the IOLoop for # read events only. self._fds[fd] = (reader, None) self._io_loop.update_handler(fd, IOLoop.READ) else: # Since we have no reader registered, we remove the # entry from the _fds and unregister the handler from # the IOLoop. del self._fds[fd] self._io_loop.remove_handler(fd) def removeAll(self): return self._removeAll(self._readers, self._writers) def getReaders(self): return self._readers.keys() def getWriters(self): return self._writers.keys() # The following functions are mainly used in twisted-style test cases; # it is expected that most users of the TornadoReactor will call # IOLoop.start() instead of Reactor.run(). def stop(self): PosixReactorBase.stop(self) fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown") self._io_loop.add_callback(fire_shutdown) def crash(self): PosixReactorBase.crash(self) self._io_loop.stop() def doIteration(self, delay): raise NotImplementedError("doIteration") def mainLoop(self): # Since this class is intended to be used in applications # where the top-level event loop is ``io_loop.start()`` rather # than ``reactor.run()``, it is implemented a little # differently than other Twisted reactors. We override # ``mainLoop`` instead of ``doIteration`` and must implement # timed call functionality on top of `.IOLoop.add_timeout` # rather than using the implementation in # ``PosixReactorBase``. self._io_loop.start() class _TestReactor(TornadoReactor): """Subclass of TornadoReactor for use in unittests. This can't go in the test.py file because of import-order dependencies with the Twisted reactor test builder. """ def __init__(self): # always use a new ioloop super(_TestReactor, self).__init__(IOLoop()) def listenTCP(self, port, factory, backlog=50, interface=''): # default to localhost to avoid firewall prompts on the mac if not interface: interface = '127.0.0.1' return super(_TestReactor, self).listenTCP( port, factory, backlog=backlog, interface=interface) def listenUDP(self, port, protocol, interface='', maxPacketSize=8192): if not interface: interface = '127.0.0.1' return super(_TestReactor, self).listenUDP( port, protocol, interface=interface, maxPacketSize=maxPacketSize) def install(io_loop=None): """Install this package as the default Twisted reactor. ``install()`` must be called very early in the startup process, before most other twisted-related imports. Conversely, because it initializes the `.IOLoop`, it cannot be called before `.fork_processes` or multi-process `~.TCPServer.start`. These conflicting requirements make it difficult to use `.TornadoReactor` in multi-process mode, and an external process manager such as ``supervisord`` is recommended instead. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ if not io_loop: io_loop = tornado.ioloop.IOLoop.current() reactor = TornadoReactor(io_loop) from twisted.internet.main import installReactor # type: ignore installReactor(reactor) return reactor @implementer(IReadDescriptor, IWriteDescriptor) class _FD(object): def __init__(self, fd, fileobj, handler): self.fd = fd self.fileobj = fileobj self.handler = handler self.reading = False self.writing = False self.lost = False def fileno(self): return self.fd def doRead(self): if not self.lost: self.handler(self.fileobj, tornado.ioloop.IOLoop.READ) def doWrite(self): if not self.lost: self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE) def connectionLost(self, reason): if not self.lost: self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR) self.lost = True def logPrefix(self): return '' class TwistedIOLoop(tornado.ioloop.IOLoop): """IOLoop implementation that runs on Twisted. `TwistedIOLoop` implements the Tornado IOLoop interface on top of the Twisted reactor. Recommended usage:: from tornado.platform.twisted import TwistedIOLoop from twisted.internet import reactor TwistedIOLoop().install() # Set up your tornado application as usual using `IOLoop.instance` reactor.run() Uses the global Twisted reactor by default. To create multiple ``TwistedIOLoops`` in the same process, you must pass a unique reactor when constructing each one. Not compatible with `tornado.process.Subprocess.set_exit_callback` because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict with each other. See also :meth:`tornado.ioloop.IOLoop.install` for general notes on installing alternative IOLoops. """ def initialize(self, reactor=None, **kwargs): super(TwistedIOLoop, self).initialize(**kwargs) if reactor is None: import twisted.internet.reactor # type: ignore reactor = twisted.internet.reactor self.reactor = reactor self.fds = {} def close(self, all_fds=False): fds = self.fds self.reactor.removeAll() for c in self.reactor.getDelayedCalls(): c.cancel() if all_fds: for fd in fds.values(): self.close_fd(fd.fileobj) def add_handler(self, fd, handler, events): if fd in self.fds: raise ValueError('fd %s added twice' % fd) fd, fileobj = self.split_fd(fd) self.fds[fd] = _FD(fd, fileobj, wrap(handler)) if events & tornado.ioloop.IOLoop.READ: self.fds[fd].reading = True self.reactor.addReader(self.fds[fd]) if events & tornado.ioloop.IOLoop.WRITE: self.fds[fd].writing = True self.reactor.addWriter(self.fds[fd]) def update_handler(self, fd, events): fd, fileobj = self.split_fd(fd) if events & tornado.ioloop.IOLoop.READ: if not self.fds[fd].reading: self.fds[fd].reading = True self.reactor.addReader(self.fds[fd]) else: if self.fds[fd].reading: self.fds[fd].reading = False self.reactor.removeReader(self.fds[fd]) if events & tornado.ioloop.IOLoop.WRITE: if not self.fds[fd].writing: self.fds[fd].writing = True self.reactor.addWriter(self.fds[fd]) else: if self.fds[fd].writing: self.fds[fd].writing = False self.reactor.removeWriter(self.fds[fd]) def remove_handler(self, fd): fd, fileobj = self.split_fd(fd) if fd not in self.fds: return self.fds[fd].lost = True if self.fds[fd].reading: self.reactor.removeReader(self.fds[fd]) if self.fds[fd].writing: self.reactor.removeWriter(self.fds[fd]) del self.fds[fd] def start(self): old_current = IOLoop.current(instance=False) try: self._setup_logging() self.make_current() self.reactor.run() finally: if old_current is None: IOLoop.clear_current() else: old_current.make_current() def stop(self): self.reactor.crash() def add_timeout(self, deadline, callback, *args, **kwargs): # This method could be simplified (since tornado 4.0) by # overriding call_at instead of add_timeout, but we leave it # for now as a test of backwards-compatibility. if isinstance(deadline, numbers.Real): delay = max(deadline - self.time(), 0) elif isinstance(deadline, datetime.timedelta): delay = timedelta_to_seconds(deadline) else: raise TypeError("Unsupported deadline %r") return self.reactor.callLater( delay, self._run_callback, functools.partial(wrap(callback), *args, **kwargs)) def remove_timeout(self, timeout): if timeout.active(): timeout.cancel() def add_callback(self, callback, *args, **kwargs): self.reactor.callFromThread( self._run_callback, functools.partial(wrap(callback), *args, **kwargs)) def add_callback_from_signal(self, callback, *args, **kwargs): self.add_callback(callback, *args, **kwargs) class TwistedResolver(Resolver): """Twisted-based asynchronous resolver. This is a non-blocking and non-threaded resolver. It is recommended only when threads cannot be used, since it has limitations compared to the standard ``getaddrinfo``-based `~tornado.netutil.Resolver` and `~tornado.netutil.ThreadedResolver`. Specifically, it returns at most one result, and arguments other than ``host`` and ``family`` are ignored. It may fail to resolve when ``family`` is not ``socket.AF_UNSPEC``. Requires Twisted 12.1 or newer. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def initialize(self, io_loop=None): self.io_loop = io_loop or IOLoop.current() # partial copy of twisted.names.client.createResolver, which doesn't # allow for a reactor to be passed in. self.reactor = tornado.platform.twisted.TornadoReactor(io_loop) host_resolver = twisted.names.hosts.Resolver('/etc/hosts') cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor) real_resolver = twisted.names.client.Resolver('/etc/resolv.conf', reactor=self.reactor) self.resolver = twisted.names.resolve.ResolverChain( [host_resolver, cache_resolver, real_resolver]) @gen.coroutine def resolve(self, host, port, family=0): # getHostByName doesn't accept IP addresses, so if the input # looks like an IP address just return it immediately. if twisted.internet.abstract.isIPAddress(host): resolved = host resolved_family = socket.AF_INET elif twisted.internet.abstract.isIPv6Address(host): resolved = host resolved_family = socket.AF_INET6 else: deferred = self.resolver.getHostByName(utf8(host)) resolved = yield gen.Task(deferred.addBoth) if isinstance(resolved, failure.Failure): try: resolved.raiseException() except twisted.names.error.DomainError as e: raise IOError(e) elif twisted.internet.abstract.isIPAddress(resolved): resolved_family = socket.AF_INET elif twisted.internet.abstract.isIPv6Address(resolved): resolved_family = socket.AF_INET6 else: resolved_family = socket.AF_UNSPEC if family != socket.AF_UNSPEC and family != resolved_family: raise Exception('Requested socket family %d but got %d' % (family, resolved_family)) result = [ (resolved_family, (resolved, port)), ] raise gen.Return(result) if hasattr(gen.convert_yielded, 'register'): @gen.convert_yielded.register(Deferred) # type: ignore def _(d): f = Future() def errback(failure): try: failure.raiseException() # Should never happen, but just in case raise Exception("errback called without error") except: f.set_exc_info(sys.exc_info()) d.addCallbacks(f.set_result, errback) return f tornado-4.5.3/tornado/platform/windows.py000066400000000000000000000012651322420601000205210ustar00rootroot00000000000000# NOTE: win32 support is currently experimental, and not recommended # for production use. from __future__ import absolute_import, division, print_function import ctypes # type: ignore import ctypes.wintypes # type: ignore # See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) SetHandleInformation.restype = ctypes.wintypes.BOOL HANDLE_FLAG_INHERIT = 0x00000001 def set_close_exec(fd): success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0) if not success: raise ctypes.WinError() tornado-4.5.3/tornado/process.py000066400000000000000000000307321322420601000166620ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for working with multiple processes, including both forking the server into multiple processes and managing subprocesses. """ from __future__ import absolute_import, division, print_function import errno import os import signal import subprocess import sys import time from binascii import hexlify from tornado.concurrent import Future from tornado import ioloop from tornado.iostream import PipeIOStream from tornado.log import gen_log from tornado.platform.auto import set_close_exec from tornado import stack_context from tornado.util import errno_from_exception, PY3 try: import multiprocessing except ImportError: # Multiprocessing is not available on Google App Engine. multiprocessing = None if PY3: long = int # Re-export this exception for convenience. try: CalledProcessError = subprocess.CalledProcessError except AttributeError: # The subprocess module exists in Google App Engine, but is empty. # This module isn't very useful in that case, but it should # at least be importable. if 'APPENGINE_RUNTIME' not in os.environ: raise def cpu_count(): """Returns the number of processors on this machine.""" if multiprocessing is None: return 1 try: return multiprocessing.cpu_count() except NotImplementedError: pass try: return os.sysconf("SC_NPROCESSORS_CONF") except (AttributeError, ValueError): pass gen_log.error("Could not detect number of processors; assuming 1") return 1 def _reseed_random(): if 'random' not in sys.modules: return import random # If os.urandom is available, this method does the same thing as # random.seed (at least as of python 2.6). If os.urandom is not # available, we mix in the pid in addition to a timestamp. try: seed = long(hexlify(os.urandom(16)), 16) except NotImplementedError: seed = int(time.time() * 1000) ^ os.getpid() random.seed(seed) def _pipe_cloexec(): r, w = os.pipe() set_close_exec(r) set_close_exec(w) return r, w _task_id = None def fork_processes(num_processes, max_restarts=100): """Starts multiple worker processes. If ``num_processes`` is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If ``num_processes`` is given and > 0, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``fork_processes``. In each child process, ``fork_processes`` returns its *task id*, a number between 0 and ``num_processes``. Processes that exit abnormally (due to a signal or non-zero exit status) are restarted with the same id (up to ``max_restarts`` times). In the parent process, ``fork_processes`` returns None if all child processes have exited normally, but will otherwise only exit by throwing an exception. """ global _task_id assert _task_id is None if num_processes is None or num_processes <= 0: num_processes = cpu_count() if ioloop.IOLoop.initialized(): raise RuntimeError("Cannot run in multiple processes: IOLoop instance " "has already been initialized. You cannot call " "IOLoop.instance() before calling start_processes()") gen_log.info("Starting %d processes", num_processes) children = {} def start_child(i): pid = os.fork() if pid == 0: # child process _reseed_random() global _task_id _task_id = i return i else: children[pid] = i return None for i in range(num_processes): id = start_child(i) if id is not None: return id num_restarts = 0 while children: try: pid, status = os.wait() except OSError as e: if errno_from_exception(e) == errno.EINTR: continue raise if pid not in children: continue id = children.pop(pid) if os.WIFSIGNALED(status): gen_log.warning("child %d (pid %d) killed by signal %d, restarting", id, pid, os.WTERMSIG(status)) elif os.WEXITSTATUS(status) != 0: gen_log.warning("child %d (pid %d) exited with status %d, restarting", id, pid, os.WEXITSTATUS(status)) else: gen_log.info("child %d (pid %d) exited normally", id, pid) continue num_restarts += 1 if num_restarts > max_restarts: raise RuntimeError("Too many child restarts, giving up") new_id = start_child(id) if new_id is not None: return new_id # All child processes exited cleanly, so exit the master process # instead of just returning to right after the call to # fork_processes (which will probably just start up another IOLoop # unless the caller checks the return value). sys.exit(0) def task_id(): """Returns the current task id, if any. Returns None if this process was not created by `fork_processes`. """ global _task_id return _task_id class Subprocess(object): """Wraps ``subprocess.Popen`` with IOStream support. The constructor is the same as ``subprocess.Popen`` with the following additions: * ``stdin``, ``stdout``, and ``stderr`` may have the value ``tornado.process.Subprocess.STREAM``, which will make the corresponding attribute of the resulting Subprocess a `.PipeIOStream`. * A new keyword argument ``io_loop`` may be used to pass in an IOLoop. The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and ``wait_for_exit`` methods do not work on Windows. There is therefore no reason to use this class instead of ``subprocess.Popen`` on that platform. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ STREAM = object() _initialized = False _waiting = {} # type: ignore def __init__(self, *args, **kwargs): self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current() # All FDs we create should be closed on error; those in to_close # should be closed in the parent process on success. pipe_fds = [] to_close = [] if kwargs.get('stdin') is Subprocess.STREAM: in_r, in_w = _pipe_cloexec() kwargs['stdin'] = in_r pipe_fds.extend((in_r, in_w)) to_close.append(in_r) self.stdin = PipeIOStream(in_w, io_loop=self.io_loop) if kwargs.get('stdout') is Subprocess.STREAM: out_r, out_w = _pipe_cloexec() kwargs['stdout'] = out_w pipe_fds.extend((out_r, out_w)) to_close.append(out_w) self.stdout = PipeIOStream(out_r, io_loop=self.io_loop) if kwargs.get('stderr') is Subprocess.STREAM: err_r, err_w = _pipe_cloexec() kwargs['stderr'] = err_w pipe_fds.extend((err_r, err_w)) to_close.append(err_w) self.stderr = PipeIOStream(err_r, io_loop=self.io_loop) try: self.proc = subprocess.Popen(*args, **kwargs) except: for fd in pipe_fds: os.close(fd) raise for fd in to_close: os.close(fd) for attr in ['stdin', 'stdout', 'stderr', 'pid']: if not hasattr(self, attr): # don't clobber streams set above setattr(self, attr, getattr(self.proc, attr)) self._exit_callback = None self.returncode = None def set_exit_callback(self, callback): """Runs ``callback`` when this process exits. The callback takes one argument, the return code of the process. This method uses a ``SIGCHLD`` handler, which is a global setting and may conflict if you have other libraries trying to handle the same signal. If you are using more than one ``IOLoop`` it may be necessary to call `Subprocess.initialize` first to designate one ``IOLoop`` to run the signal handlers. In many cases a close callback on the stdout or stderr streams can be used as an alternative to an exit callback if the signal handler is causing a problem. """ self._exit_callback = stack_context.wrap(callback) Subprocess.initialize(self.io_loop) Subprocess._waiting[self.pid] = self Subprocess._try_cleanup_process(self.pid) def wait_for_exit(self, raise_error=True): """Returns a `.Future` which resolves when the process exits. Usage:: ret = yield proc.wait_for_exit() This is a coroutine-friendly alternative to `set_exit_callback` (and a replacement for the blocking `subprocess.Popen.wait`). By default, raises `subprocess.CalledProcessError` if the process has a non-zero exit status. Use ``wait_for_exit(raise_error=False)`` to suppress this behavior and return the exit status without raising. .. versionadded:: 4.2 """ future = Future() def callback(ret): if ret != 0 and raise_error: # Unfortunately we don't have the original args any more. future.set_exception(CalledProcessError(ret, None)) else: future.set_result(ret) self.set_exit_callback(callback) return future @classmethod def initialize(cls, io_loop=None): """Initializes the ``SIGCHLD`` handler. The signal handler is run on an `.IOLoop` to avoid locking issues. Note that the `.IOLoop` used for signal handling need not be the same one used by individual Subprocess objects (as long as the ``IOLoops`` are each running in separate threads). .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ if cls._initialized: return if io_loop is None: io_loop = ioloop.IOLoop.current() cls._old_sigchld = signal.signal( signal.SIGCHLD, lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup)) cls._initialized = True @classmethod def uninitialize(cls): """Removes the ``SIGCHLD`` handler.""" if not cls._initialized: return signal.signal(signal.SIGCHLD, cls._old_sigchld) cls._initialized = False @classmethod def _cleanup(cls): for pid in list(cls._waiting.keys()): # make a copy cls._try_cleanup_process(pid) @classmethod def _try_cleanup_process(cls, pid): try: ret_pid, status = os.waitpid(pid, os.WNOHANG) except OSError as e: if errno_from_exception(e) == errno.ECHILD: return if ret_pid == 0: return assert ret_pid == pid subproc = cls._waiting.pop(pid) subproc.io_loop.add_callback_from_signal( subproc._set_returncode, status) def _set_returncode(self, status): if os.WIFSIGNALED(status): self.returncode = -os.WTERMSIG(status) else: assert os.WIFEXITED(status) self.returncode = os.WEXITSTATUS(status) # We've taken over wait() duty from the subprocess.Popen # object. If we don't inform it of the process's return code, # it will log a warning at destruction in python 3.6+. self.proc.returncode = self.returncode if self._exit_callback: callback = self._exit_callback self._exit_callback = None callback(self.returncode) tornado-4.5.3/tornado/queues.py000066400000000000000000000241041322420601000165070ustar00rootroot00000000000000# Copyright 2015 The Tornado Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Asynchronous queues for coroutines. .. warning:: Unlike the standard library's `queue` module, the classes defined here are *not* thread-safe. To use these queues from another thread, use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread before calling any queue methods. """ from __future__ import absolute_import, division, print_function import collections import heapq from tornado import gen, ioloop from tornado.concurrent import Future from tornado.locks import Event __all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty'] class QueueEmpty(Exception): """Raised by `.Queue.get_nowait` when the queue has no items.""" pass class QueueFull(Exception): """Raised by `.Queue.put_nowait` when a queue is at its maximum size.""" pass def _set_timeout(future, timeout): if timeout: def on_timeout(): future.set_exception(gen.TimeoutError()) io_loop = ioloop.IOLoop.current() timeout_handle = io_loop.add_timeout(timeout, on_timeout) future.add_done_callback( lambda _: io_loop.remove_timeout(timeout_handle)) class _QueueIterator(object): def __init__(self, q): self.q = q def __anext__(self): return self.q.get() class Queue(object): """Coordinate producer and consumer coroutines. If maxsize is 0 (the default) the queue size is unbounded. .. testcode:: from tornado import gen from tornado.ioloop import IOLoop from tornado.queues import Queue q = Queue(maxsize=2) @gen.coroutine def consumer(): while True: item = yield q.get() try: print('Doing work on %s' % item) yield gen.sleep(0.01) finally: q.task_done() @gen.coroutine def producer(): for item in range(5): yield q.put(item) print('Put %s' % item) @gen.coroutine def main(): # Start consumer without waiting (since it never finishes). IOLoop.current().spawn_callback(consumer) yield producer() # Wait for producer to put all tasks. yield q.join() # Wait for consumer to finish all tasks. print('Done') IOLoop.current().run_sync(main) .. testoutput:: Put 0 Put 1 Doing work on 0 Put 2 Doing work on 1 Put 3 Doing work on 2 Put 4 Doing work on 3 Doing work on 4 Done In Python 3.5, `Queue` implements the async iterator protocol, so ``consumer()`` could be rewritten as:: async def consumer(): async for item in q: try: print('Doing work on %s' % item) yield gen.sleep(0.01) finally: q.task_done() .. versionchanged:: 4.3 Added ``async for`` support in Python 3.5. """ def __init__(self, maxsize=0): if maxsize is None: raise TypeError("maxsize can't be None") if maxsize < 0: raise ValueError("maxsize can't be negative") self._maxsize = maxsize self._init() self._getters = collections.deque([]) # Futures. self._putters = collections.deque([]) # Pairs of (item, Future). self._unfinished_tasks = 0 self._finished = Event() self._finished.set() @property def maxsize(self): """Number of items allowed in the queue.""" return self._maxsize def qsize(self): """Number of items in the queue.""" return len(self._queue) def empty(self): return not self._queue def full(self): if self.maxsize == 0: return False else: return self.qsize() >= self.maxsize def put(self, item, timeout=None): """Put an item into the queue, perhaps waiting until there is room. Returns a Future, which raises `tornado.gen.TimeoutError` after a timeout. """ try: self.put_nowait(item) except QueueFull: future = Future() self._putters.append((item, future)) _set_timeout(future, timeout) return future else: return gen._null_future def put_nowait(self, item): """Put an item into the queue without blocking. If no free slot is immediately available, raise `QueueFull`. """ self._consume_expired() if self._getters: assert self.empty(), "queue non-empty, why are getters waiting?" getter = self._getters.popleft() self.__put_internal(item) getter.set_result(self._get()) elif self.full(): raise QueueFull else: self.__put_internal(item) def get(self, timeout=None): """Remove and return an item from the queue. Returns a Future which resolves once an item is available, or raises `tornado.gen.TimeoutError` after a timeout. """ future = Future() try: future.set_result(self.get_nowait()) except QueueEmpty: self._getters.append(future) _set_timeout(future, timeout) return future def get_nowait(self): """Remove and return an item from the queue without blocking. Return an item if one is immediately available, else raise `QueueEmpty`. """ self._consume_expired() if self._putters: assert self.full(), "queue not full, why are putters waiting?" item, putter = self._putters.popleft() self.__put_internal(item) putter.set_result(None) return self._get() elif self.qsize(): return self._get() else: raise QueueEmpty def task_done(self): """Indicate that a formerly enqueued task is complete. Used by queue consumers. For each `.get` used to fetch a task, a subsequent call to `.task_done` tells the queue that the processing on the task is complete. If a `.join` is blocking, it resumes when all items have been processed; that is, when every `.put` is matched by a `.task_done`. Raises `ValueError` if called more times than `.put`. """ if self._unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self._unfinished_tasks -= 1 if self._unfinished_tasks == 0: self._finished.set() def join(self, timeout=None): """Block until all items in the queue are processed. Returns a Future, which raises `tornado.gen.TimeoutError` after a timeout. """ return self._finished.wait(timeout) @gen.coroutine def __aiter__(self): return _QueueIterator(self) # These three are overridable in subclasses. def _init(self): self._queue = collections.deque() def _get(self): return self._queue.popleft() def _put(self, item): self._queue.append(item) # End of the overridable methods. def __put_internal(self, item): self._unfinished_tasks += 1 self._finished.clear() self._put(item) def _consume_expired(self): # Remove timed-out waiters. while self._putters and self._putters[0][1].done(): self._putters.popleft() while self._getters and self._getters[0].done(): self._getters.popleft() def __repr__(self): return '<%s at %s %s>' % ( type(self).__name__, hex(id(self)), self._format()) def __str__(self): return '<%s %s>' % (type(self).__name__, self._format()) def _format(self): result = 'maxsize=%r' % (self.maxsize, ) if getattr(self, '_queue', None): result += ' queue=%r' % self._queue if self._getters: result += ' getters[%s]' % len(self._getters) if self._putters: result += ' putters[%s]' % len(self._putters) if self._unfinished_tasks: result += ' tasks=%s' % self._unfinished_tasks return result class PriorityQueue(Queue): """A `.Queue` that retrieves entries in priority order, lowest first. Entries are typically tuples like ``(priority number, data)``. .. testcode:: from tornado.queues import PriorityQueue q = PriorityQueue() q.put((1, 'medium-priority item')) q.put((0, 'high-priority item')) q.put((10, 'low-priority item')) print(q.get_nowait()) print(q.get_nowait()) print(q.get_nowait()) .. testoutput:: (0, 'high-priority item') (1, 'medium-priority item') (10, 'low-priority item') """ def _init(self): self._queue = [] def _put(self, item): heapq.heappush(self._queue, item) def _get(self): return heapq.heappop(self._queue) class LifoQueue(Queue): """A `.Queue` that retrieves the most recently put items first. .. testcode:: from tornado.queues import LifoQueue q = LifoQueue() q.put(3) q.put(2) q.put(1) print(q.get_nowait()) print(q.get_nowait()) print(q.get_nowait()) .. testoutput:: 1 2 3 """ def _init(self): self._queue = [] def _put(self, item): self._queue.append(item) def _get(self): return self._queue.pop() tornado-4.5.3/tornado/routing.py000066400000000000000000000523471322420601000167010ustar00rootroot00000000000000# Copyright 2015 The Tornado Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Flexible routing implementation. Tornado routes HTTP requests to appropriate handlers using `Router` class implementations. The `tornado.web.Application` class is a `Router` implementation and may be used directly, or the classes in this module may be used for additional flexibility. The `RuleRouter` class can match on more criteria than `.Application`, or the `Router` interface can be subclassed for maximum customization. `Router` interface extends `~.httputil.HTTPServerConnectionDelegate` to provide additional routing capabilities. This also means that any `Router` implementation can be used directly as a ``request_callback`` for `~.httpserver.HTTPServer` constructor. `Router` subclass must implement a ``find_handler`` method to provide a suitable `~.httputil.HTTPMessageDelegate` instance to handle the request: .. code-block:: python class CustomRouter(Router): def find_handler(self, request, **kwargs): # some routing logic providing a suitable HTTPMessageDelegate instance return MessageDelegate(request.connection) class MessageDelegate(HTTPMessageDelegate): def __init__(self, connection): self.connection = connection def finish(self): self.connection.write_headers( ResponseStartLine("HTTP/1.1", 200, "OK"), HTTPHeaders({"Content-Length": "2"}), b"OK") self.connection.finish() router = CustomRouter() server = HTTPServer(router) The main responsibility of `Router` implementation is to provide a mapping from a request to `~.httputil.HTTPMessageDelegate` instance that will handle this request. In the example above we can see that routing is possible even without instantiating an `~.web.Application`. For routing to `~.web.RequestHandler` implementations we need an `~.web.Application` instance. `~.web.Application.get_handler_delegate` provides a convenient way to create `~.httputil.HTTPMessageDelegate` for a given request and `~.web.RequestHandler`. Here is a simple example of how we can we route to `~.web.RequestHandler` subclasses by HTTP method: .. code-block:: python resources = {} class GetResource(RequestHandler): def get(self, path): if path not in resources: raise HTTPError(404) self.finish(resources[path]) class PostResource(RequestHandler): def post(self, path): resources[path] = self.request.body class HTTPMethodRouter(Router): def __init__(self, app): self.app = app def find_handler(self, request, **kwargs): handler = GetResource if request.method == "GET" else PostResource return self.app.get_handler_delegate(request, handler, path_args=[request.path]) router = HTTPMethodRouter(Application()) server = HTTPServer(router) `ReversibleRouter` interface adds the ability to distinguish between the routes and reverse them to the original urls using route's name and additional arguments. `~.web.Application` is itself an implementation of `ReversibleRouter` class. `RuleRouter` and `ReversibleRuleRouter` are implementations of `Router` and `ReversibleRouter` interfaces and can be used for creating rule-based routing configurations. Rules are instances of `Rule` class. They contain a `Matcher`, which provides the logic for determining whether the rule is a match for a particular request and a target, which can be one of the following. 1) An instance of `~.httputil.HTTPServerConnectionDelegate`: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/handler"), ConnectionDelegate()), # ... more rules ]) class ConnectionDelegate(HTTPServerConnectionDelegate): def start_request(self, server_conn, request_conn): return MessageDelegate(request_conn) 2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/callable"), request_callable) ]) def request_callable(request): request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK") request.finish() 3) Another `Router` instance: .. code-block:: python router = RuleRouter([ Rule(PathMatches("/router.*"), CustomRouter()) ]) Of course a nested `RuleRouter` or a `~.web.Application` is allowed: .. code-block:: python router = RuleRouter([ Rule(HostMatches("example.com"), RuleRouter([ Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)]))), ])) ]) server = HTTPServer(router) In the example below `RuleRouter` is used to route between applications: .. code-block:: python app1 = Application([ (r"/app1/handler", Handler1), # other handlers ... ]) app2 = Application([ (r"/app2/handler", Handler2), # other handlers ... ]) router = RuleRouter([ Rule(PathMatches("/app1.*"), app1), Rule(PathMatches("/app2.*"), app2) ]) server = HTTPServer(router) For more information on application-level routing see docs for `~.web.Application`. .. versionadded:: 4.5 """ from __future__ import absolute_import, division, print_function import re from functools import partial from tornado import httputil from tornado.httpserver import _CallableAdapter from tornado.escape import url_escape, url_unescape, utf8 from tornado.log import app_log from tornado.util import basestring_type, import_object, re_unescape, unicode_type try: import typing # noqa except ImportError: pass class Router(httputil.HTTPServerConnectionDelegate): """Abstract router interface.""" def find_handler(self, request, **kwargs): # type: (httputil.HTTPServerRequest, typing.Any)->httputil.HTTPMessageDelegate """Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate` that can serve the request. Routing implementations may pass additional kwargs to extend the routing logic. :arg httputil.HTTPServerRequest request: current HTTP request. :arg kwargs: additional keyword arguments passed by routing implementation. :returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to process the request. """ raise NotImplementedError() def start_request(self, server_conn, request_conn): return _RoutingDelegate(self, server_conn, request_conn) class ReversibleRouter(Router): """Abstract router interface for routers that can handle named routes and support reversing them to original urls. """ def reverse_url(self, name, *args): """Returns url string for a given route name and arguments or ``None`` if no match is found. :arg str name: route name. :arg args: url parameters. :returns: parametrized url string for a given route name (or ``None``). """ raise NotImplementedError() class _RoutingDelegate(httputil.HTTPMessageDelegate): def __init__(self, router, server_conn, request_conn): self.server_conn = server_conn self.request_conn = request_conn self.delegate = None self.router = router # type: Router def headers_received(self, start_line, headers): request = httputil.HTTPServerRequest( connection=self.request_conn, server_connection=self.server_conn, start_line=start_line, headers=headers) self.delegate = self.router.find_handler(request) return self.delegate.headers_received(start_line, headers) def data_received(self, chunk): return self.delegate.data_received(chunk) def finish(self): self.delegate.finish() def on_connection_close(self): self.delegate.on_connection_close() class RuleRouter(Router): """Rule-based router implementation.""" def __init__(self, rules=None): """Constructs a router from an ordered list of rules:: RuleRouter([ Rule(PathMatches("/handler"), Target), # ... more rules ]) You can also omit explicit `Rule` constructor and use tuples of arguments:: RuleRouter([ (PathMatches("/handler"), Target), ]) `PathMatches` is a default matcher, so the example above can be simplified:: RuleRouter([ ("/handler", Target), ]) In the examples above, ``Target`` can be a nested `Router` instance, an instance of `~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument. :arg rules: a list of `Rule` instances or tuples of `Rule` constructor arguments. """ self.rules = [] # type: typing.List[Rule] if rules: self.add_rules(rules) def add_rules(self, rules): """Appends new rules to the router. :arg rules: a list of Rule instances (or tuples of arguments, which are passed to Rule constructor). """ for rule in rules: if isinstance(rule, (tuple, list)): assert len(rule) in (2, 3, 4) if isinstance(rule[0], basestring_type): rule = Rule(PathMatches(rule[0]), *rule[1:]) else: rule = Rule(*rule) self.rules.append(self.process_rule(rule)) def process_rule(self, rule): """Override this method for additional preprocessing of each rule. :arg Rule rule: a rule to be processed. :returns: the same or modified Rule instance. """ return rule def find_handler(self, request, **kwargs): for rule in self.rules: target_params = rule.matcher.match(request) if target_params is not None: if rule.target_kwargs: target_params['target_kwargs'] = rule.target_kwargs delegate = self.get_target_delegate( rule.target, request, **target_params) if delegate is not None: return delegate return None def get_target_delegate(self, target, request, **target_params): """Returns an instance of `~.httputil.HTTPMessageDelegate` for a Rule's target. This method is called by `~.find_handler` and can be extended to provide additional target types. :arg target: a Rule's target. :arg httputil.HTTPServerRequest request: current request. :arg target_params: additional parameters that can be useful for `~.httputil.HTTPMessageDelegate` creation. """ if isinstance(target, Router): return target.find_handler(request, **target_params) elif isinstance(target, httputil.HTTPServerConnectionDelegate): return target.start_request(request.server_connection, request.connection) elif callable(target): return _CallableAdapter( partial(target, **target_params), request.connection ) return None class ReversibleRuleRouter(ReversibleRouter, RuleRouter): """A rule-based router that implements ``reverse_url`` method. Each rule added to this router may have a ``name`` attribute that can be used to reconstruct an original uri. The actual reconstruction takes place in a rule's matcher (see `Matcher.reverse`). """ def __init__(self, rules=None): self.named_rules = {} # type: typing.Dict[str] super(ReversibleRuleRouter, self).__init__(rules) def process_rule(self, rule): rule = super(ReversibleRuleRouter, self).process_rule(rule) if rule.name: if rule.name in self.named_rules: app_log.warning( "Multiple handlers named %s; replacing previous value", rule.name) self.named_rules[rule.name] = rule return rule def reverse_url(self, name, *args): if name in self.named_rules: return self.named_rules[name].matcher.reverse(*args) for rule in self.rules: if isinstance(rule.target, ReversibleRouter): reversed_url = rule.target.reverse_url(name, *args) if reversed_url is not None: return reversed_url return None class Rule(object): """A routing rule.""" def __init__(self, matcher, target, target_kwargs=None, name=None): """Constructs a Rule instance. :arg Matcher matcher: a `Matcher` instance used for determining whether the rule should be considered a match for a specific request. :arg target: a Rule's target (typically a ``RequestHandler`` or `~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`, depending on routing implementation). :arg dict target_kwargs: a dict of parameters that can be useful at the moment of target instantiation (for example, ``status_code`` for a ``RequestHandler`` subclass). They end up in ``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate` method. :arg str name: the name of the rule that can be used to find it in `ReversibleRouter.reverse_url` implementation. """ if isinstance(target, str): # import the Module and instantiate the class # Must be a fully qualified name (module.ClassName) target = import_object(target) self.matcher = matcher # type: Matcher self.target = target self.target_kwargs = target_kwargs if target_kwargs else {} self.name = name def reverse(self, *args): return self.matcher.reverse(*args) def __repr__(self): return '%s(%r, %s, kwargs=%r, name=%r)' % \ (self.__class__.__name__, self.matcher, self.target, self.target_kwargs, self.name) class Matcher(object): """Represents a matcher for request features.""" def match(self, request): """Matches current instance against the request. :arg httputil.HTTPServerRequest request: current HTTP request :returns: a dict of parameters to be passed to the target handler (for example, ``handler_kwargs``, ``path_args``, ``path_kwargs`` can be passed for proper `~.web.RequestHandler` instantiation). An empty dict is a valid (and common) return value to indicate a match when the argument-passing features are not used. ``None`` must be returned to indicate that there is no match.""" raise NotImplementedError() def reverse(self, *args): """Reconstructs full url from matcher instance and additional arguments.""" return None class AnyMatches(Matcher): """Matches any request.""" def match(self, request): return {} class HostMatches(Matcher): """Matches requests from hosts specified by ``host_pattern`` regex.""" def __init__(self, host_pattern): if isinstance(host_pattern, basestring_type): if not host_pattern.endswith("$"): host_pattern += "$" self.host_pattern = re.compile(host_pattern) else: self.host_pattern = host_pattern def match(self, request): if self.host_pattern.match(request.host_name): return {} return None class DefaultHostMatches(Matcher): """Matches requests from host that is equal to application's default_host. Always returns no match if ``X-Real-Ip`` header is present. """ def __init__(self, application, host_pattern): self.application = application self.host_pattern = host_pattern def match(self, request): # Look for default host if not behind load balancer (for debugging) if "X-Real-Ip" not in request.headers: if self.host_pattern.match(self.application.default_host): return {} return None class PathMatches(Matcher): """Matches requests with paths specified by ``path_pattern`` regex.""" def __init__(self, path_pattern): if isinstance(path_pattern, basestring_type): if not path_pattern.endswith('$'): path_pattern += '$' self.regex = re.compile(path_pattern) else: self.regex = path_pattern assert len(self.regex.groupindex) in (0, self.regex.groups), \ ("groups in url regexes must either be all named or all " "positional: %r" % self.regex.pattern) self._path, self._group_count = self._find_groups() def match(self, request): match = self.regex.match(request.path) if match is None: return None if not self.regex.groups: return {} path_args, path_kwargs = [], {} # Pass matched groups to the handler. Since # match.groups() includes both named and # unnamed groups, we want to use either groups # or groupdict but not both. if self.regex.groupindex: path_kwargs = dict( (str(k), _unquote_or_none(v)) for (k, v) in match.groupdict().items()) else: path_args = [_unquote_or_none(s) for s in match.groups()] return dict(path_args=path_args, path_kwargs=path_kwargs) def reverse(self, *args): if self._path is None: raise ValueError("Cannot reverse url regex " + self.regex.pattern) assert len(args) == self._group_count, "required number of arguments " \ "not found" if not len(args): return self._path converted_args = [] for a in args: if not isinstance(a, (unicode_type, bytes)): a = str(a) converted_args.append(url_escape(utf8(a), plus=False)) return self._path % tuple(converted_args) def _find_groups(self): """Returns a tuple (reverse string, group count) for a url. For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method would return ('/%s/%s/', 2). """ pattern = self.regex.pattern if pattern.startswith('^'): pattern = pattern[1:] if pattern.endswith('$'): pattern = pattern[:-1] if self.regex.groups != pattern.count('('): # The pattern is too complicated for our simplistic matching, # so we can't support reversing it. return None, None pieces = [] for fragment in pattern.split('('): if ')' in fragment: paren_loc = fragment.index(')') if paren_loc >= 0: pieces.append('%s' + fragment[paren_loc + 1:]) else: try: unescaped_fragment = re_unescape(fragment) except ValueError as exc: # If we can't unescape part of it, we can't # reverse this url. return (None, None) pieces.append(unescaped_fragment) return ''.join(pieces), self.regex.groups class URLSpec(Rule): """Specifies mappings between URLs and handlers. .. versionchanged: 4.5 `URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for backwards compatibility. """ def __init__(self, pattern, handler, kwargs=None, name=None): """Parameters: * ``pattern``: Regular expression to be matched. Any capturing groups in the regex will be passed in to the handler's get/post/etc methods as arguments (by keyword if named, by position if unnamed. Named and unnamed capturing groups may may not be mixed in the same rule). * ``handler``: `~.web.RequestHandler` subclass to be invoked. * ``kwargs`` (optional): A dictionary of additional arguments to be passed to the handler's constructor. * ``name`` (optional): A name for this handler. Used by `~.web.Application.reverse_url`. """ super(URLSpec, self).__init__(PathMatches(pattern), handler, kwargs, name) self.regex = self.matcher.regex self.handler_class = self.target self.kwargs = kwargs def __repr__(self): return '%s(%r, %s, kwargs=%r, name=%r)' % \ (self.__class__.__name__, self.regex.pattern, self.handler_class, self.kwargs, self.name) def _unquote_or_none(s): """None-safe wrapper around url_unescape to handle unmatched optional groups correctly. Note that args are passed as bytes so the handler can decide what encoding to use. """ if s is None: return s return url_unescape(s, encoding=None, plus=False) tornado-4.5.3/tornado/simple_httpclient.py000066400000000000000000000602201322420601000207260ustar00rootroot00000000000000#!/usr/bin/env python from __future__ import absolute_import, division, print_function from tornado.escape import utf8, _unicode from tornado import gen from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy from tornado import httputil from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters from tornado.iostream import StreamClosedError from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults from tornado.log import gen_log from tornado import stack_context from tornado.tcpclient import TCPClient from tornado.util import PY3 import base64 import collections import copy import functools import re import socket import sys from io import BytesIO if PY3: import urllib.parse as urlparse else: import urlparse try: import ssl except ImportError: # ssl is not available on Google App Engine. ssl = None try: import certifi except ImportError: certifi = None def _default_ca_certs(): if certifi is None: raise Exception("The 'certifi' package is required to use https " "in simple_httpclient") return certifi.where() class SimpleAsyncHTTPClient(AsyncHTTPClient): """Non-blocking HTTP client with no external dependencies. This class implements an HTTP 1.1 client on top of Tornado's IOStreams. Some features found in the curl-based AsyncHTTPClient are not yet supported. In particular, proxies are not supported, connections are not reused, and callers cannot select the network interface to be used. """ def initialize(self, io_loop, max_clients=10, hostname_mapping=None, max_buffer_size=104857600, resolver=None, defaults=None, max_header_size=None, max_body_size=None): """Creates a AsyncHTTPClient. Only a single AsyncHTTPClient instance exists per IOLoop in order to provide limitations on the number of pending connections. ``force_instance=True`` may be used to suppress this behavior. Note that because of this implicit reuse, unless ``force_instance`` is used, only the first call to the constructor actually uses its arguments. It is recommended to use the ``configure`` method instead of the constructor to ensure that arguments take effect. ``max_clients`` is the number of concurrent requests that can be in progress; when this limit is reached additional requests will be queued. Note that time spent waiting in this queue still counts against the ``request_timeout``. ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses. It can be used to make local DNS changes when modifying system-wide settings like ``/etc/hosts`` is not possible or desirable (e.g. in unittests). ``max_buffer_size`` (default 100MB) is the number of bytes that can be read into memory at once. ``max_body_size`` (defaults to ``max_buffer_size``) is the largest response body that the client will accept. Without a ``streaming_callback``, the smaller of these two limits applies; with a ``streaming_callback`` only ``max_body_size`` does. .. versionchanged:: 4.2 Added the ``max_body_size`` argument. """ super(SimpleAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) self.max_clients = max_clients self.queue = collections.deque() self.active = {} self.waiting = {} self.max_buffer_size = max_buffer_size self.max_header_size = max_header_size self.max_body_size = max_body_size # TCPClient could create a Resolver for us, but we have to do it # ourselves to support hostname_mapping. if resolver: self.resolver = resolver self.own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping) self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop) def close(self): super(SimpleAsyncHTTPClient, self).close() if self.own_resolver: self.resolver.close() self.tcp_client.close() def fetch_impl(self, request, callback): key = object() self.queue.append((key, request, callback)) if not len(self.active) < self.max_clients: timeout_handle = self.io_loop.add_timeout( self.io_loop.time() + min(request.connect_timeout, request.request_timeout), functools.partial(self._on_timeout, key, "in request queue")) else: timeout_handle = None self.waiting[key] = (request, callback, timeout_handle) self._process_queue() if self.queue: gen_log.debug("max_clients limit reached, request queued. " "%d active, %d queued requests." % ( len(self.active), len(self.queue))) def _process_queue(self): with stack_context.NullContext(): while self.queue and len(self.active) < self.max_clients: key, request, callback = self.queue.popleft() if key not in self.waiting: continue self._remove_timeout(key) self.active[key] = (request, callback) release_callback = functools.partial(self._release_fetch, key) self._handle_request(request, release_callback, callback) def _connection_class(self): return _HTTPConnection def _handle_request(self, request, release_callback, final_callback): self._connection_class()( self.io_loop, self, request, release_callback, final_callback, self.max_buffer_size, self.tcp_client, self.max_header_size, self.max_body_size) def _release_fetch(self, key): del self.active[key] self._process_queue() def _remove_timeout(self, key): if key in self.waiting: request, callback, timeout_handle = self.waiting[key] if timeout_handle is not None: self.io_loop.remove_timeout(timeout_handle) del self.waiting[key] def _on_timeout(self, key, info=None): """Timeout callback of request. Construct a timeout HTTPResponse when a timeout occurs. :arg object key: A simple object to mark the request. :info string key: More detailed timeout information. """ request, callback, timeout_handle = self.waiting[key] self.queue.remove((key, request, callback)) error_message = "Timeout {0}".format(info) if info else "Timeout" timeout_response = HTTPResponse( request, 599, error=HTTPError(599, error_message), request_time=self.io_loop.time() - request.start_time) self.io_loop.add_callback(callback, timeout_response) del self.waiting[key] class _HTTPConnection(httputil.HTTPMessageDelegate): _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"]) def __init__(self, io_loop, client, request, release_callback, final_callback, max_buffer_size, tcp_client, max_header_size, max_body_size): self.start_time = io_loop.time() self.io_loop = io_loop self.client = client self.request = request self.release_callback = release_callback self.final_callback = final_callback self.max_buffer_size = max_buffer_size self.tcp_client = tcp_client self.max_header_size = max_header_size self.max_body_size = max_body_size self.code = None self.headers = None self.chunks = [] self._decompressor = None # Timeout handle returned by IOLoop.add_timeout self._timeout = None self._sockaddr = None with stack_context.ExceptionStackContext(self._handle_exception): self.parsed = urlparse.urlsplit(_unicode(self.request.url)) if self.parsed.scheme not in ("http", "https"): raise ValueError("Unsupported url scheme: %s" % self.request.url) # urlsplit results have hostname and port results, but they # didn't support ipv6 literals until python 2.7. netloc = self.parsed.netloc if "@" in netloc: userpass, _, netloc = netloc.rpartition("@") host, port = httputil.split_host_and_port(netloc) if port is None: port = 443 if self.parsed.scheme == "https" else 80 if re.match(r'^\[.*\]$', host): # raw ipv6 addresses in urls are enclosed in brackets host = host[1:-1] self.parsed_hostname = host # save final host for _on_connect if request.allow_ipv6 is False: af = socket.AF_INET else: af = socket.AF_UNSPEC ssl_options = self._get_ssl_options(self.parsed.scheme) timeout = min(self.request.connect_timeout, self.request.request_timeout) if timeout: self._timeout = self.io_loop.add_timeout( self.start_time + timeout, stack_context.wrap(functools.partial(self._on_timeout, "while connecting"))) self.tcp_client.connect(host, port, af=af, ssl_options=ssl_options, max_buffer_size=self.max_buffer_size, callback=self._on_connect) def _get_ssl_options(self, scheme): if scheme == "https": if self.request.ssl_options is not None: return self.request.ssl_options # If we are using the defaults, don't construct a # new SSLContext. if (self.request.validate_cert and self.request.ca_certs is None and self.request.client_cert is None and self.request.client_key is None): return _client_ssl_defaults ssl_options = {} if self.request.validate_cert: ssl_options["cert_reqs"] = ssl.CERT_REQUIRED if self.request.ca_certs is not None: ssl_options["ca_certs"] = self.request.ca_certs elif not hasattr(ssl, 'create_default_context'): # When create_default_context is present, # we can omit the "ca_certs" parameter entirely, # which avoids the dependency on "certifi" for py34. ssl_options["ca_certs"] = _default_ca_certs() if self.request.client_key is not None: ssl_options["keyfile"] = self.request.client_key if self.request.client_cert is not None: ssl_options["certfile"] = self.request.client_cert # SSL interoperability is tricky. We want to disable # SSLv2 for security reasons; it wasn't disabled by default # until openssl 1.0. The best way to do this is to use # the SSL_OP_NO_SSLv2, but that wasn't exposed to python # until 3.2. Python 2.7 adds the ciphers argument, which # can also be used to disable SSLv2. As a last resort # on python 2.6, we set ssl_version to TLSv1. This is # more narrow than we'd like since it also breaks # compatibility with servers configured for SSLv3 only, # but nearly all servers support both SSLv3 and TLSv1: # http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html if sys.version_info >= (2, 7): # In addition to disabling SSLv2, we also exclude certain # classes of insecure ciphers. ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES" else: # This is really only necessary for pre-1.0 versions # of openssl, but python 2.6 doesn't expose version # information. ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1 return ssl_options return None def _on_timeout(self, info=None): """Timeout callback of _HTTPConnection instance. Raise a timeout HTTPError when a timeout occurs. :info string key: More detailed timeout information. """ self._timeout = None error_message = "Timeout {0}".format(info) if info else "Timeout" if self.final_callback is not None: raise HTTPError(599, error_message) def _remove_timeout(self): if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None def _on_connect(self, stream): if self.final_callback is None: # final_callback is cleared if we've hit our timeout. stream.close() return self.stream = stream self.stream.set_close_callback(self.on_connection_close) self._remove_timeout() if self.final_callback is None: return if self.request.request_timeout: self._timeout = self.io_loop.add_timeout( self.start_time + self.request.request_timeout, stack_context.wrap(functools.partial(self._on_timeout, "during request"))) if (self.request.method not in self._SUPPORTED_METHODS and not self.request.allow_nonstandard_methods): raise KeyError("unknown method %s" % self.request.method) for key in ('network_interface', 'proxy_host', 'proxy_port', 'proxy_username', 'proxy_password', 'proxy_auth_mode'): if getattr(self.request, key, None): raise NotImplementedError('%s not supported' % key) if "Connection" not in self.request.headers: self.request.headers["Connection"] = "close" if "Host" not in self.request.headers: if '@' in self.parsed.netloc: self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1] else: self.request.headers["Host"] = self.parsed.netloc username, password = None, None if self.parsed.username is not None: username, password = self.parsed.username, self.parsed.password elif self.request.auth_username is not None: username = self.request.auth_username password = self.request.auth_password or '' if username is not None: if self.request.auth_mode not in (None, "basic"): raise ValueError("unsupported auth_mode %s", self.request.auth_mode) auth = utf8(username) + b":" + utf8(password) self.request.headers["Authorization"] = (b"Basic " + base64.b64encode(auth)) if self.request.user_agent: self.request.headers["User-Agent"] = self.request.user_agent if not self.request.allow_nonstandard_methods: # Some HTTP methods nearly always have bodies while others # almost never do. Fail in this case unless the user has # opted out of sanity checks with allow_nonstandard_methods. body_expected = self.request.method in ("POST", "PATCH", "PUT") body_present = (self.request.body is not None or self.request.body_producer is not None) if ((body_expected and not body_present) or (body_present and not body_expected)): raise ValueError( 'Body must %sbe None for method %s (unless ' 'allow_nonstandard_methods is true)' % ('not ' if body_expected else '', self.request.method)) if self.request.expect_100_continue: self.request.headers["Expect"] = "100-continue" if self.request.body is not None: # When body_producer is used the caller is responsible for # setting Content-Length (or else chunked encoding will be used). self.request.headers["Content-Length"] = str(len( self.request.body)) if (self.request.method == "POST" and "Content-Type" not in self.request.headers): self.request.headers["Content-Type"] = "application/x-www-form-urlencoded" if self.request.decompress_response: self.request.headers["Accept-Encoding"] = "gzip" req_path = ((self.parsed.path or '/') + (('?' + self.parsed.query) if self.parsed.query else '')) self.connection = self._create_connection(stream) start_line = httputil.RequestStartLine(self.request.method, req_path, '') self.connection.write_headers(start_line, self.request.headers) if self.request.expect_100_continue: self._read_response() else: self._write_body(True) def _create_connection(self, stream): stream.set_nodelay(True) connection = HTTP1Connection( stream, True, HTTP1ConnectionParameters( no_keep_alive=True, max_header_size=self.max_header_size, max_body_size=self.max_body_size, decompress=self.request.decompress_response), self._sockaddr) return connection def _write_body(self, start_read): if self.request.body is not None: self.connection.write(self.request.body) elif self.request.body_producer is not None: fut = self.request.body_producer(self.connection.write) if fut is not None: fut = gen.convert_yielded(fut) def on_body_written(fut): fut.result() self.connection.finish() if start_read: self._read_response() self.io_loop.add_future(fut, on_body_written) return self.connection.finish() if start_read: self._read_response() def _read_response(self): # Ensure that any exception raised in read_response ends up in our # stack context. self.io_loop.add_future( self.connection.read_response(self), lambda f: f.result()) def _release(self): if self.release_callback is not None: release_callback = self.release_callback self.release_callback = None release_callback() def _run_callback(self, response): self._release() if self.final_callback is not None: final_callback = self.final_callback self.final_callback = None self.io_loop.add_callback(final_callback, response) def _handle_exception(self, typ, value, tb): if self.final_callback: self._remove_timeout() if isinstance(value, StreamClosedError): if value.real_error is None: value = HTTPError(599, "Stream closed") else: value = value.real_error self._run_callback(HTTPResponse(self.request, 599, error=value, request_time=self.io_loop.time() - self.start_time, )) if hasattr(self, "stream"): # TODO: this may cause a StreamClosedError to be raised # by the connection's Future. Should we cancel the # connection more gracefully? self.stream.close() return True else: # If our callback has already been called, we are probably # catching an exception that is not caused by us but rather # some child of our callback. Rather than drop it on the floor, # pass it along, unless it's just the stream being closed. return isinstance(value, StreamClosedError) def on_connection_close(self): if self.final_callback is not None: message = "Connection closed" if self.stream.error: raise self.stream.error try: raise HTTPError(599, message) except HTTPError: self._handle_exception(*sys.exc_info()) def headers_received(self, first_line, headers): if self.request.expect_100_continue and first_line.code == 100: self._write_body(False) return self.code = first_line.code self.reason = first_line.reason self.headers = headers if self._should_follow_redirect(): return if self.request.header_callback is not None: # Reassemble the start line. self.request.header_callback('%s %s %s\r\n' % first_line) for k, v in self.headers.get_all(): self.request.header_callback("%s: %s\r\n" % (k, v)) self.request.header_callback('\r\n') def _should_follow_redirect(self): return (self.request.follow_redirects and self.request.max_redirects > 0 and self.code in (301, 302, 303, 307, 308)) def finish(self): data = b''.join(self.chunks) self._remove_timeout() original_request = getattr(self.request, "original_request", self.request) if self._should_follow_redirect(): assert isinstance(self.request, _RequestProxy) new_request = copy.copy(self.request.request) new_request.url = urlparse.urljoin(self.request.url, self.headers["Location"]) new_request.max_redirects = self.request.max_redirects - 1 del new_request.headers["Host"] # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 # Client SHOULD make a GET request after a 303. # According to the spec, 302 should be followed by the same # method as the original request, but in practice browsers # treat 302 the same as 303, and many servers use 302 for # compatibility with pre-HTTP/1.1 user agents which don't # understand the 303 status. if self.code in (302, 303): new_request.method = "GET" new_request.body = None for h in ["Content-Length", "Content-Type", "Content-Encoding", "Transfer-Encoding"]: try: del self.request.headers[h] except KeyError: pass new_request.original_request = original_request final_callback = self.final_callback self.final_callback = None self._release() self.client.fetch(new_request, final_callback) self._on_end_request() return if self.request.streaming_callback: buffer = BytesIO() else: buffer = BytesIO(data) # TODO: don't require one big string? response = HTTPResponse(original_request, self.code, reason=getattr(self, 'reason', None), headers=self.headers, request_time=self.io_loop.time() - self.start_time, buffer=buffer, effective_url=self.request.url) self._run_callback(response) self._on_end_request() def _on_end_request(self): self.stream.close() def data_received(self, chunk): if self._should_follow_redirect(): # We're going to follow a redirect so just discard the body. return if self.request.streaming_callback is not None: self.request.streaming_callback(chunk) else: self.chunks.append(chunk) if __name__ == "__main__": AsyncHTTPClient.configure(SimpleAsyncHTTPClient) main() tornado-4.5.3/tornado/speedups.c000066400000000000000000000020661322420601000166250ustar00rootroot00000000000000#define PY_SSIZE_T_CLEAN #include static PyObject* websocket_mask(PyObject* self, PyObject* args) { const char* mask; Py_ssize_t mask_len; const char* data; Py_ssize_t data_len; Py_ssize_t i; PyObject* result; char* buf; if (!PyArg_ParseTuple(args, "s#s#", &mask, &mask_len, &data, &data_len)) { return NULL; } result = PyBytes_FromStringAndSize(NULL, data_len); if (!result) { return NULL; } buf = PyBytes_AsString(result); for (i = 0; i < data_len; i++) { buf[i] = data[i] ^ mask[i % 4]; } return result; } static PyMethodDef methods[] = { {"websocket_mask", websocket_mask, METH_VARARGS, ""}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef speedupsmodule = { PyModuleDef_HEAD_INIT, "speedups", NULL, -1, methods }; PyMODINIT_FUNC PyInit_speedups(void) { return PyModule_Create(&speedupsmodule); } #else // Python 2.x PyMODINIT_FUNC initspeedups(void) { Py_InitModule("tornado.speedups", methods); } #endif tornado-4.5.3/tornado/speedups.pyi000066400000000000000000000000731322420601000172000ustar00rootroot00000000000000def websocket_mask(mask: bytes, data: bytes) -> bytes: ... tornado-4.5.3/tornado/stack_context.py000066400000000000000000000315501322420601000200540ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2010 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """`StackContext` allows applications to maintain threadlocal-like state that follows execution as it moves to other execution contexts. The motivating examples are to eliminate the need for explicit ``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to allow some additional context to be kept for logging. This is slightly magic, but it's an extension of the idea that an exception handler is a kind of stack-local state and when that stack is suspended and resumed in a new context that state needs to be preserved. `StackContext` shifts the burden of restoring that state from each call site (e.g. wrapping each `.AsyncHTTPClient` callback in ``async_callback``) to the mechanisms that transfer control from one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`, thread pools, etc). Example usage:: @contextlib.contextmanager def die_on_error(): try: yield except Exception: logging.error("exception in asynchronous operation",exc_info=True) sys.exit(1) with StackContext(die_on_error): # Any exception thrown here *or in callback and its descendants* # will cause the process to exit instead of spinning endlessly # in the ioloop. http_client.fetch(url, callback) ioloop.start() Most applications shouldn't have to work with `StackContext` directly. Here are a few rules of thumb for when it's necessary: * If you're writing an asynchronous library that doesn't rely on a stack_context-aware library like `tornado.ioloop` or `tornado.iostream` (for example, if you're writing a thread pool), use `.stack_context.wrap()` before any asynchronous operations to capture the stack context from where the operation was started. * If you're writing an asynchronous library that has some shared resources (such as a connection pool), create those shared resources within a ``with stack_context.NullContext():`` block. This will prevent ``StackContexts`` from leaking from one request to another. * If you want to write something like an exception handler that will persist across asynchronous calls, create a new `StackContext` (or `ExceptionStackContext`), and make your asynchronous calls in a ``with`` block that references your `StackContext`. """ from __future__ import absolute_import, division, print_function import sys import threading from tornado.util import raise_exc_info class StackContextInconsistentError(Exception): pass class _State(threading.local): def __init__(self): self.contexts = (tuple(), None) _state = _State() class StackContext(object): """Establishes the given context as a StackContext that will be transferred. Note that the parameter is a callable that returns a context manager, not the context itself. That is, where for a non-transferable context manager you would say:: with my_context(): StackContext takes the function itself rather than its result:: with StackContext(my_context): The result of ``with StackContext() as cb:`` is a deactivation callback. Run this callback when the StackContext is no longer needed to ensure that it is not propagated any further (note that deactivating a context does not affect any instances of that context that are currently pending). This is an advanced feature and not necessary in most applications. """ def __init__(self, context_factory): self.context_factory = context_factory self.contexts = [] self.active = True def _deactivate(self): self.active = False # StackContext protocol def enter(self): context = self.context_factory() self.contexts.append(context) context.__enter__() def exit(self, type, value, traceback): context = self.contexts.pop() context.__exit__(type, value, traceback) # Note that some of this code is duplicated in ExceptionStackContext # below. ExceptionStackContext is more common and doesn't need # the full generality of this class. def __enter__(self): self.old_contexts = _state.contexts self.new_contexts = (self.old_contexts[0] + (self,), self) _state.contexts = self.new_contexts try: self.enter() except: _state.contexts = self.old_contexts raise return self._deactivate def __exit__(self, type, value, traceback): try: self.exit(type, value, traceback) finally: final_contexts = _state.contexts _state.contexts = self.old_contexts # Generator coroutines and with-statements with non-local # effects interact badly. Check here for signs of # the stack getting out of sync. # Note that this check comes after restoring _state.context # so that if it fails things are left in a (relatively) # consistent state. if final_contexts is not self.new_contexts: raise StackContextInconsistentError( 'stack_context inconsistency (may be caused by yield ' 'within a "with StackContext" block)') # Break up a reference to itself to allow for faster GC on CPython. self.new_contexts = None class ExceptionStackContext(object): """Specialization of StackContext for exception handling. The supplied ``exception_handler`` function will be called in the event of an uncaught exception in this context. The semantics are similar to a try/finally clause, and intended use cases are to log an error, close a socket, or similar cleanup actions. The ``exc_info`` triple ``(type, value, traceback)`` will be passed to the exception_handler function. If the exception handler returns true, the exception will be consumed and will not be propagated to other exception handlers. """ def __init__(self, exception_handler): self.exception_handler = exception_handler self.active = True def _deactivate(self): self.active = False def exit(self, type, value, traceback): if type is not None: return self.exception_handler(type, value, traceback) def __enter__(self): self.old_contexts = _state.contexts self.new_contexts = (self.old_contexts[0], self) _state.contexts = self.new_contexts return self._deactivate def __exit__(self, type, value, traceback): try: if type is not None: return self.exception_handler(type, value, traceback) finally: final_contexts = _state.contexts _state.contexts = self.old_contexts if final_contexts is not self.new_contexts: raise StackContextInconsistentError( 'stack_context inconsistency (may be caused by yield ' 'within a "with StackContext" block)') # Break up a reference to itself to allow for faster GC on CPython. self.new_contexts = None class NullContext(object): """Resets the `StackContext`. Useful when creating a shared resource on demand (e.g. an `.AsyncHTTPClient`) where the stack that caused the creating is not relevant to future operations. """ def __enter__(self): self.old_contexts = _state.contexts _state.contexts = (tuple(), None) def __exit__(self, type, value, traceback): _state.contexts = self.old_contexts def _remove_deactivated(contexts): """Remove deactivated handlers from the chain""" # Clean ctx handlers stack_contexts = tuple([h for h in contexts[0] if h.active]) # Find new head head = contexts[1] while head is not None and not head.active: head = head.old_contexts[1] # Process chain ctx = head while ctx is not None: parent = ctx.old_contexts[1] while parent is not None: if parent.active: break ctx.old_contexts = parent.old_contexts parent = parent.old_contexts[1] ctx = parent return (stack_contexts, head) def wrap(fn): """Returns a callable object that will restore the current `StackContext` when executed. Use this whenever saving a callback to be executed later in a different execution context (either in a different thread or asynchronously in the same thread). """ # Check if function is already wrapped if fn is None or hasattr(fn, '_wrapped'): return fn # Capture current stack head # TODO: Any other better way to store contexts and update them in wrapped function? cap_contexts = [_state.contexts] if not cap_contexts[0][0] and not cap_contexts[0][1]: # Fast path when there are no active contexts. def null_wrapper(*args, **kwargs): try: current_state = _state.contexts _state.contexts = cap_contexts[0] return fn(*args, **kwargs) finally: _state.contexts = current_state null_wrapper._wrapped = True return null_wrapper def wrapped(*args, **kwargs): ret = None try: # Capture old state current_state = _state.contexts # Remove deactivated items cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0]) # Force new state _state.contexts = contexts # Current exception exc = (None, None, None) top = None # Apply stack contexts last_ctx = 0 stack = contexts[0] # Apply state for n in stack: try: n.enter() last_ctx += 1 except: # Exception happened. Record exception info and store top-most handler exc = sys.exc_info() top = n.old_contexts[1] # Execute callback if no exception happened while restoring state if top is None: try: ret = fn(*args, **kwargs) except: exc = sys.exc_info() top = contexts[1] # If there was exception, try to handle it by going through the exception chain if top is not None: exc = _handle_exception(top, exc) else: # Otherwise take shorter path and run stack contexts in reverse order while last_ctx > 0: last_ctx -= 1 c = stack[last_ctx] try: c.exit(*exc) except: exc = sys.exc_info() top = c.old_contexts[1] break else: top = None # If if exception happened while unrolling, take longer exception handler path if top is not None: exc = _handle_exception(top, exc) # If exception was not handled, raise it if exc != (None, None, None): raise_exc_info(exc) finally: _state.contexts = current_state return ret wrapped._wrapped = True return wrapped def _handle_exception(tail, exc): while tail is not None: try: if tail.exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() tail = tail.old_contexts[1] return exc def run_with_stack_context(context, func): """Run a coroutine ``func`` in the given `StackContext`. It is not safe to have a ``yield`` statement within a ``with StackContext`` block, so it is difficult to use stack context with `.gen.coroutine`. This helper function runs the function in the correct context while keeping the ``yield`` and ``with`` statements syntactically separate. Example:: @gen.coroutine def incorrect(): with StackContext(ctx): # ERROR: this will raise StackContextInconsistentError yield other_coroutine() @gen.coroutine def correct(): yield run_with_stack_context(StackContext(ctx), other_coroutine) .. versionadded:: 3.1 """ with context: return func() tornado-4.5.3/tornado/tcpclient.py000066400000000000000000000206731322420601000171740ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2014 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A non-blocking TCP connection factory. """ from __future__ import absolute_import, division, print_function import functools import socket from tornado.concurrent import Future from tornado.ioloop import IOLoop from tornado.iostream import IOStream from tornado import gen from tornado.netutil import Resolver from tornado.platform.auto import set_close_exec _INITIAL_CONNECT_TIMEOUT = 0.3 class _Connector(object): """A stateless implementation of the "Happy Eyeballs" algorithm. "Happy Eyeballs" is documented in RFC6555 as the recommended practice for when both IPv4 and IPv6 addresses are available. In this implementation, we partition the addresses by family, and make the first connection attempt to whichever address was returned first by ``getaddrinfo``. If that connection fails or times out, we begin a connection in parallel to the first address of the other family. If there are additional failures we retry with other addresses, keeping one connection attempt per family in flight at a time. http://tools.ietf.org/html/rfc6555 """ def __init__(self, addrinfo, io_loop, connect): self.io_loop = io_loop self.connect = connect self.future = Future() self.timeout = None self.last_error = None self.remaining = len(addrinfo) self.primary_addrs, self.secondary_addrs = self.split(addrinfo) @staticmethod def split(addrinfo): """Partition the ``addrinfo`` list by address family. Returns two lists. The first list contains the first entry from ``addrinfo`` and all others with the same family, and the second list contains all other addresses (normally one list will be AF_INET and the other AF_INET6, although non-standard resolvers may return additional families). """ primary = [] secondary = [] primary_af = addrinfo[0][0] for af, addr in addrinfo: if af == primary_af: primary.append((af, addr)) else: secondary.append((af, addr)) return primary, secondary def start(self, timeout=_INITIAL_CONNECT_TIMEOUT): self.try_connect(iter(self.primary_addrs)) self.set_timout(timeout) return self.future def try_connect(self, addrs): try: af, addr = next(addrs) except StopIteration: # We've reached the end of our queue, but the other queue # might still be working. Send a final error on the future # only when both queues are finished. if self.remaining == 0 and not self.future.done(): self.future.set_exception(self.last_error or IOError("connection failed")) return future = self.connect(af, addr) future.add_done_callback(functools.partial(self.on_connect_done, addrs, af, addr)) def on_connect_done(self, addrs, af, addr, future): self.remaining -= 1 try: stream = future.result() except Exception as e: if self.future.done(): return # Error: try again (but remember what happened so we have an # error to raise in the end) self.last_error = e self.try_connect(addrs) if self.timeout is not None: # If the first attempt failed, don't wait for the # timeout to try an address from the secondary queue. self.io_loop.remove_timeout(self.timeout) self.on_timeout() return self.clear_timeout() if self.future.done(): # This is a late arrival; just drop it. stream.close() else: self.future.set_result((af, addr, stream)) def set_timout(self, timeout): self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, self.on_timeout) def on_timeout(self): self.timeout = None self.try_connect(iter(self.secondary_addrs)) def clear_timeout(self): if self.timeout is not None: self.io_loop.remove_timeout(self.timeout) class TCPClient(object): """A non-blocking TCP connection factory. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def __init__(self, resolver=None, io_loop=None): self.io_loop = io_loop or IOLoop.current() if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self._own_resolver = True def close(self): if self._own_resolver: self.resolver.close() @gen.coroutine def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None, source_ip=None, source_port=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). Using the ``source_ip`` kwarg, one can specify the source IP address to use when establishing the connection. In case the user needs to resolve and use a specific interface, it has to be handled outside of Tornado as this depends very much on the platform. Similarly, when the user requires a certain source port, it can be specified using the ``source_port`` arg. .. versionchanged:: 4.5 Added the ``source_ip`` and ``source_port`` arguments. """ addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, self.io_loop, functools.partial(self._create_stream, max_buffer_size, source_ip=source_ip, source_port=source_port) ) af, addr, stream = yield connector.start() # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: stream = yield stream.start_tls(False, ssl_options=ssl_options, server_hostname=host) raise gen.Return(stream) def _create_stream(self, max_buffer_size, af, addr, source_ip=None, source_port=None): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. source_port_bind = source_port if isinstance(source_port, int) else 0 source_ip_bind = source_ip if source_port_bind and not source_ip: # User required a specific port, but did not specify # a certain source IP, will bind to the default loopback. source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1' # Trying to use the same address family as the requested af socket: # - 127.0.0.1 for IPv4 # - ::1 for IPv6 socket_obj = socket.socket(af) set_close_exec(socket_obj.fileno()) if source_port_bind or source_ip_bind: # If the user requires binding also to a specific IP/port. try: socket_obj.bind((source_ip_bind, source_port_bind)) except socket.error: socket_obj.close() # Fail loudly if unable to use the IP/port. raise try: stream = IOStream(socket_obj, io_loop=self.io_loop, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() fu.set_exception(e) return fu else: return stream.connect(addr) tornado-4.5.3/tornado/tcpserver.py000066400000000000000000000303131322420601000172140ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2011 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A non-blocking, single-threaded TCP server.""" from __future__ import absolute_import, division, print_function import errno import os import socket from tornado import gen from tornado.log import app_log from tornado.ioloop import IOLoop from tornado.iostream import IOStream, SSLIOStream from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket from tornado import process from tornado.util import errno_from_exception try: import ssl except ImportError: # ssl is not available on Google App Engine. ssl = None class TCPServer(object): r"""A non-blocking, single-threaded TCP server. To use `TCPServer`, define a subclass which overrides the `handle_stream` method. For example, a simple echo server could be defined like this:: from tornado.tcpserver import TCPServer from tornado.iostream import StreamClosedError from tornado import gen class EchoServer(TCPServer): @gen.coroutine def handle_stream(self, stream, address): while True: try: data = yield stream.read_until(b"\n") yield stream.write(data) except StreamClosedError: break To make this server serve SSL traffic, send the ``ssl_options`` keyword argument with an `ssl.SSLContext` object. For compatibility with older versions of Python ``ssl_options`` may also be a dictionary of keyword arguments for the `ssl.wrap_socket` method.:: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), os.path.join(data_dir, "mydomain.key")) TCPServer(ssl_options=ssl_ctx) `TCPServer` initialization follows one of three patterns: 1. `listen`: simple single-process:: server = TCPServer() server.listen(8888) IOLoop.current().start() 2. `bind`/`start`: simple multi-process:: server = TCPServer() server.bind(8888) server.start(0) # Forks multiple sub-processes IOLoop.current().start() When using this interface, an `.IOLoop` must *not* be passed to the `TCPServer` constructor. `start` will always start the server on the default singleton `.IOLoop`. 3. `add_sockets`: advanced multi-process:: sockets = bind_sockets(8888) tornado.process.fork_processes(0) server = TCPServer() server.add_sockets(sockets) IOLoop.current().start() The `add_sockets` interface is more complicated, but it can be used with `tornado.process.fork_processes` to give you more flexibility in when the fork happens. `add_sockets` can also be used in single-process servers if you want to create your listening sockets in some way other than `~tornado.netutil.bind_sockets`. .. versionadded:: 3.1 The ``max_buffer_size`` argument. """ def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None, read_chunk_size=None): self.io_loop = io_loop self.ssl_options = ssl_options self._sockets = {} # fd -> socket object self._pending_sockets = [] self._started = False self._stopped = False self.max_buffer_size = max_buffer_size self.read_chunk_size = read_chunk_size # Verify the SSL options. Otherwise we don't get errors until clients # connect. This doesn't verify that the keys are legitimate, but # the SSL module doesn't do that until there is a connected socket # which seems like too much work if self.ssl_options is not None and isinstance(self.ssl_options, dict): # Only certfile is required: it can contain both keys if 'certfile' not in self.ssl_options: raise KeyError('missing key "certfile" in ssl_options') if not os.path.exists(self.ssl_options['certfile']): raise ValueError('certfile "%s" does not exist' % self.ssl_options['certfile']) if ('keyfile' in self.ssl_options and not os.path.exists(self.ssl_options['keyfile'])): raise ValueError('keyfile "%s" does not exist' % self.ssl_options['keyfile']) def listen(self, port, address=""): """Starts accepting connections on the given port. This method may be called more than once to listen on multiple ports. `listen` takes effect immediately; it is not necessary to call `TCPServer.start` afterwards. It is, however, necessary to start the `.IOLoop`. """ sockets = bind_sockets(port, address=address) self.add_sockets(sockets) def add_sockets(self, sockets): """Makes this server start accepting connections on the given sockets. The ``sockets`` parameter is a list of socket objects such as those returned by `~tornado.netutil.bind_sockets`. `add_sockets` is typically used in combination with that method and `tornado.process.fork_processes` to provide greater control over the initialization of a multi-process server. """ if self.io_loop is None: self.io_loop = IOLoop.current() for sock in sockets: self._sockets[sock.fileno()] = sock add_accept_handler(sock, self._handle_connection, io_loop=self.io_loop) def add_socket(self, socket): """Singular version of `add_sockets`. Takes a single socket object.""" self.add_sockets([socket]) def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128, reuse_port=False): """Binds this server to the given port on the given address. To start the server, call `start`. If you want to run this server in a single process, you can call `listen` as a shortcut to the sequence of `bind` and `start` calls. Address may be either an IP address or hostname. If it's a hostname, the server will listen on all IP addresses associated with the name. Address may be an empty string or None to listen on all available interfaces. Family may be set to either `socket.AF_INET` or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise both will be used if available. The ``backlog`` argument has the same meaning as for `socket.listen `. The ``reuse_port`` argument has the same meaning as for `.bind_sockets`. This method may be called multiple times prior to `start` to listen on multiple ports or interfaces. .. versionchanged:: 4.4 Added the ``reuse_port`` argument. """ sockets = bind_sockets(port, address=address, family=family, backlog=backlog, reuse_port=reuse_port) if self._started: self.add_sockets(sockets) else: self._pending_sockets.extend(sockets) def start(self, num_processes=1): """Starts this server in the `.IOLoop`. By default, we run the server in this process and do not fork any additional child process. If num_processes is ``None`` or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If num_processes is given and > 1, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``TCPServer.start(n)``. """ assert not self._started self._started = True if num_processes != 1: process.fork_processes(num_processes) sockets = self._pending_sockets self._pending_sockets = [] self.add_sockets(sockets) def stop(self): """Stops listening for new connections. Requests currently in progress may still continue after the server is stopped. """ if self._stopped: return self._stopped = True for fd, sock in self._sockets.items(): assert sock.fileno() == fd self.io_loop.remove_handler(fd) sock.close() def handle_stream(self, stream, address): """Override to handle a new `.IOStream` from an incoming connection. This method may be a coroutine; if so any exceptions it raises asynchronously will be logged. Accepting of incoming connections will not be blocked by this coroutine. If this `TCPServer` is configured for SSL, ``handle_stream`` may be called before the SSL handshake has completed. Use `.SSLIOStream.wait_for_handshake` if you need to verify the client's certificate or use NPN/ALPN. .. versionchanged:: 4.2 Added the option for this method to be a coroutine. """ raise NotImplementedError() def _handle_connection(self, connection, address): if self.ssl_options is not None: assert ssl, "Python 2.6+ and OpenSSL required for SSL" try: connection = ssl_wrap_socket(connection, self.ssl_options, server_side=True, do_handshake_on_connect=False) except ssl.SSLError as err: if err.args[0] == ssl.SSL_ERROR_EOF: return connection.close() else: raise except socket.error as err: # If the connection is closed immediately after it is created # (as in a port scan), we can get one of several errors. # wrap_socket makes an internal call to getpeername, # which may return either EINVAL (Mac OS X) or ENOTCONN # (Linux). If it returns ENOTCONN, this error is # silently swallowed by the ssl module, so we need to # catch another error later on (AttributeError in # SSLIOStream._do_ssl_handshake). # To test this behavior, try nmap with the -sT flag. # https://github.com/tornadoweb/tornado/pull/750 if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL): return connection.close() else: raise try: if self.ssl_options is not None: stream = SSLIOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size, read_chunk_size=self.read_chunk_size) else: stream = IOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size, read_chunk_size=self.read_chunk_size) future = self.handle_stream(stream, address) if future is not None: self.io_loop.add_future(gen.convert_yielded(future), lambda f: f.result()) except Exception: app_log.error("Error in connection callback", exc_info=True) tornado-4.5.3/tornado/template.py000066400000000000000000001047701322420601000170230ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A simple template system that compiles templates to Python code. Basic usage looks like:: t = template.Template("{{ myvalue }}") print(t.generate(myvalue="XXX")) `Loader` is a class that loads templates from a root directory and caches the compiled templates:: loader = template.Loader("/home/btaylor") print(loader.load("test.html").generate(myvalue="XXX")) We compile all templates to raw Python. Error-reporting is currently... uh, interesting. Syntax for the templates:: ### base.html {% block title %}Default title{% end %}
    {% for student in students %} {% block student %}
  • {{ escape(student.name) }}
  • {% end %} {% end %}
### bold.html {% extends "base.html" %} {% block title %}A bolder title{% end %} {% block student %}
  • {{ escape(student.name) }}
  • {% end %} Unlike most other template systems, we do not put any restrictions on the expressions you can include in your statements. ``if`` and ``for`` blocks get translated exactly into Python, so you can do complex expressions like:: {% for student in [p for p in people if p.student and p.age > 23] %}
  • {{ escape(student.name) }}
  • {% end %} Translating directly to Python means you can apply functions to expressions easily, like the ``escape()`` function in the examples above. You can pass functions in to your template just like any other variable (In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`):: ### Python code def add(x, y): return x + y template.execute(add=add) ### The template {{ add(1, 2) }} We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`, `.json_encode()`, and `.squeeze()` to all templates by default. Typical applications do not create `Template` or `Loader` instances by hand, but instead use the `~.RequestHandler.render` and `~.RequestHandler.render_string` methods of `tornado.web.RequestHandler`, which load templates automatically based on the ``template_path`` `.Application` setting. Variable names beginning with ``_tt_`` are reserved by the template system and should not be used by application code. Syntax Reference ---------------- Template expressions are surrounded by double curly braces: ``{{ ... }}``. The contents may be any python expression, which will be escaped according to the current autoescape setting and inserted into the output. Other template directives use ``{% %}``. To comment out a section so that it is omitted from the output, surround it with ``{# ... #}``. These tags may be escaped as ``{{!``, ``{%!``, and ``{#!`` if you need to include a literal ``{{``, ``{%``, or ``{#`` in the output. ``{% apply *function* %}...{% end %}`` Applies a function to the output of all template code between ``apply`` and ``end``:: {% apply linkify %}{{name}} said: {{message}}{% end %} Note that as an implementation detail apply blocks are implemented as nested functions and thus may interact strangely with variables set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}`` within loops. ``{% autoescape *function* %}`` Sets the autoescape mode for the current file. This does not affect other files, even those referenced by ``{% include %}``. Note that autoescaping can also be configured globally, at the `.Application` or `Loader`.:: {% autoescape xhtml_escape %} {% autoescape None %} ``{% block *name* %}...{% end %}`` Indicates a named, replaceable block for use with ``{% extends %}``. Blocks in the parent template will be replaced with the contents of the same-named block in a child template.:: {% block title %}Default title{% end %} {% extends "base.html" %} {% block title %}My page title{% end %} ``{% comment ... %}`` A comment which will be removed from the template output. Note that there is no ``{% end %}`` tag; the comment goes from the word ``comment`` to the closing ``%}`` tag. ``{% extends *filename* %}`` Inherit from another template. Templates that use ``extends`` should contain one or more ``block`` tags to replace content from the parent template. Anything in the child template not contained in a ``block`` tag will be ignored. For an example, see the ``{% block %}`` tag. ``{% for *var* in *expr* %}...{% end %}`` Same as the python ``for`` statement. ``{% break %}`` and ``{% continue %}`` may be used inside the loop. ``{% from *x* import *y* %}`` Same as the python ``import`` statement. ``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}`` Conditional statement - outputs the first section whose condition is true. (The ``elif`` and ``else`` sections are optional) ``{% import *module* %}`` Same as the python ``import`` statement. ``{% include *filename* %}`` Includes another template file. The included file can see all the local variables as if it were copied directly to the point of the ``include`` directive (the ``{% autoescape %}`` directive is an exception). Alternately, ``{% module Template(filename, **kwargs) %}`` may be used to include another template with an isolated namespace. ``{% module *expr* %}`` Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is not escaped:: {% module Template("foo.html", arg=42) %} ``UIModules`` are a feature of the `tornado.web.RequestHandler` class (and specifically its ``render`` method) and will not work when the template system is used on its own in other contexts. ``{% raw *expr* %}`` Outputs the result of the given expression without autoescaping. ``{% set *x* = *y* %}`` Sets a local variable. ``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}`` Same as the python ``try`` statement. ``{% while *condition* %}... {% end %}`` Same as the python ``while`` statement. ``{% break %}`` and ``{% continue %}`` may be used inside the loop. ``{% whitespace *mode* %}`` Sets the whitespace mode for the remainder of the current file (or until the next ``{% whitespace %}`` directive). See `filter_whitespace` for available options. New in Tornado 4.3. """ from __future__ import absolute_import, division, print_function import datetime import linecache import os.path import posixpath import re import threading from tornado import escape from tornado.log import app_log from tornado.util import ObjectDict, exec_in, unicode_type, PY3 if PY3: from io import StringIO else: from cStringIO import StringIO _DEFAULT_AUTOESCAPE = "xhtml_escape" _UNSET = object() def filter_whitespace(mode, text): """Transform whitespace in ``text`` according to ``mode``. Available modes are: * ``all``: Return all whitespace unmodified. * ``single``: Collapse consecutive whitespace with a single whitespace character, preserving newlines. * ``oneline``: Collapse all runs of whitespace into a single space character, removing all newlines in the process. .. versionadded:: 4.3 """ if mode == 'all': return text elif mode == 'single': text = re.sub(r"([\t ]+)", " ", text) text = re.sub(r"(\s*\n\s*)", "\n", text) return text elif mode == 'oneline': return re.sub(r"(\s+)", " ", text) else: raise Exception("invalid whitespace mode %s" % mode) class Template(object): """A compiled template. We compile into Python from the given template_string. You can generate the template from variables with generate(). """ # note that the constructor's signature is not extracted with # autodoc because _UNSET looks like garbage. When changing # this signature update website/sphinx/template.rst too. def __init__(self, template_string, name="", loader=None, compress_whitespace=_UNSET, autoescape=_UNSET, whitespace=None): """Construct a Template. :arg str template_string: the contents of the template file. :arg str name: the filename from which the template was loaded (used for error message). :arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible for this template, used to resolve ``{% include %}`` and ``{% extend %}`` directives. :arg bool compress_whitespace: Deprecated since Tornado 4.3. Equivalent to ``whitespace="single"`` if true and ``whitespace="all"`` if false. :arg str autoescape: The name of a function in the template namespace, or ``None`` to disable escaping by default. :arg str whitespace: A string specifying treatment of whitespace; see `filter_whitespace` for options. .. versionchanged:: 4.3 Added ``whitespace`` parameter; deprecated ``compress_whitespace``. """ self.name = escape.native_str(name) if compress_whitespace is not _UNSET: # Convert deprecated compress_whitespace (bool) to whitespace (str). if whitespace is not None: raise Exception("cannot set both whitespace and compress_whitespace") whitespace = "single" if compress_whitespace else "all" if whitespace is None: if loader and loader.whitespace: whitespace = loader.whitespace else: # Whitespace defaults by filename. if name.endswith(".html") or name.endswith(".js"): whitespace = "single" else: whitespace = "all" # Validate the whitespace setting. filter_whitespace(whitespace, '') if autoescape is not _UNSET: self.autoescape = autoescape elif loader: self.autoescape = loader.autoescape else: self.autoescape = _DEFAULT_AUTOESCAPE self.namespace = loader.namespace if loader else {} reader = _TemplateReader(name, escape.native_str(template_string), whitespace) self.file = _File(self, _parse(reader, self)) self.code = self._generate_python(loader) self.loader = loader try: # Under python2.5, the fake filename used here must match # the module name used in __name__ below. # The dont_inherit flag prevents template.py's future imports # from being applied to the generated code. self.compiled = compile( escape.to_unicode(self.code), "%s.generated.py" % self.name.replace('.', '_'), "exec", dont_inherit=True) except Exception: formatted_code = _format_code(self.code).rstrip() app_log.error("%s code:\n%s", self.name, formatted_code) raise def generate(self, **kwargs): """Generate this template with the given arguments.""" namespace = { "escape": escape.xhtml_escape, "xhtml_escape": escape.xhtml_escape, "url_escape": escape.url_escape, "json_encode": escape.json_encode, "squeeze": escape.squeeze, "linkify": escape.linkify, "datetime": datetime, "_tt_utf8": escape.utf8, # for internal use "_tt_string_types": (unicode_type, bytes), # __name__ and __loader__ allow the traceback mechanism to find # the generated source code. "__name__": self.name.replace('.', '_'), "__loader__": ObjectDict(get_source=lambda name: self.code), } namespace.update(self.namespace) namespace.update(kwargs) exec_in(self.compiled, namespace) execute = namespace["_tt_execute"] # Clear the traceback module's cache of source data now that # we've generated a new template (mainly for this module's # unittests, where different tests reuse the same name). linecache.clearcache() return execute() def _generate_python(self, loader): buffer = StringIO() try: # named_blocks maps from names to _NamedBlock objects named_blocks = {} ancestors = self._get_ancestors(loader) ancestors.reverse() for ancestor in ancestors: ancestor.find_named_blocks(loader, named_blocks) writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template) ancestors[0].generate(writer) return buffer.getvalue() finally: buffer.close() def _get_ancestors(self, loader): ancestors = [self.file] for chunk in self.file.body.chunks: if isinstance(chunk, _ExtendsBlock): if not loader: raise ParseError("{% extends %} block found, but no " "template loader") template = loader.load(chunk.name, self.name) ancestors.extend(template._get_ancestors(loader)) return ancestors class BaseLoader(object): """Base class for template loaders. You must use a template loader to use template constructs like ``{% extends %}`` and ``{% include %}``. The loader caches all templates after they are loaded the first time. """ def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None, whitespace=None): """Construct a template loader. :arg str autoescape: The name of a function in the template namespace, such as "xhtml_escape", or ``None`` to disable autoescaping by default. :arg dict namespace: A dictionary to be added to the default template namespace, or ``None``. :arg str whitespace: A string specifying default behavior for whitespace in templates; see `filter_whitespace` for options. Default is "single" for files ending in ".html" and ".js" and "all" for other files. .. versionchanged:: 4.3 Added ``whitespace`` parameter. """ self.autoescape = autoescape self.namespace = namespace or {} self.whitespace = whitespace self.templates = {} # self.lock protects self.templates. It's a reentrant lock # because templates may load other templates via `include` or # `extends`. Note that thanks to the GIL this code would be safe # even without the lock, but could lead to wasted work as multiple # threads tried to compile the same template simultaneously. self.lock = threading.RLock() def reset(self): """Resets the cache of compiled templates.""" with self.lock: self.templates = {} def resolve_path(self, name, parent_path=None): """Converts a possibly-relative path to absolute (used internally).""" raise NotImplementedError() def load(self, name, parent_path=None): """Loads a template.""" name = self.resolve_path(name, parent_path=parent_path) with self.lock: if name not in self.templates: self.templates[name] = self._create_template(name) return self.templates[name] def _create_template(self, name): raise NotImplementedError() class Loader(BaseLoader): """A template loader that loads from a single root directory. """ def __init__(self, root_directory, **kwargs): super(Loader, self).__init__(**kwargs) self.root = os.path.abspath(root_directory) def resolve_path(self, name, parent_path=None): if parent_path and not parent_path.startswith("<") and \ not parent_path.startswith("/") and \ not name.startswith("/"): current_path = os.path.join(self.root, parent_path) file_dir = os.path.dirname(os.path.abspath(current_path)) relative_path = os.path.abspath(os.path.join(file_dir, name)) if relative_path.startswith(self.root): name = relative_path[len(self.root) + 1:] return name def _create_template(self, name): path = os.path.join(self.root, name) with open(path, "rb") as f: template = Template(f.read(), name=name, loader=self) return template class DictLoader(BaseLoader): """A template loader that loads from a dictionary.""" def __init__(self, dict, **kwargs): super(DictLoader, self).__init__(**kwargs) self.dict = dict def resolve_path(self, name, parent_path=None): if parent_path and not parent_path.startswith("<") and \ not parent_path.startswith("/") and \ not name.startswith("/"): file_dir = posixpath.dirname(parent_path) name = posixpath.normpath(posixpath.join(file_dir, name)) return name def _create_template(self, name): return Template(self.dict[name], name=name, loader=self) class _Node(object): def each_child(self): return () def generate(self, writer): raise NotImplementedError() def find_named_blocks(self, loader, named_blocks): for child in self.each_child(): child.find_named_blocks(loader, named_blocks) class _File(_Node): def __init__(self, template, body): self.template = template self.body = body self.line = 0 def generate(self, writer): writer.write_line("def _tt_execute():", self.line) with writer.indent(): writer.write_line("_tt_buffer = []", self.line) writer.write_line("_tt_append = _tt_buffer.append", self.line) self.body.generate(writer) writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line) def each_child(self): return (self.body,) class _ChunkList(_Node): def __init__(self, chunks): self.chunks = chunks def generate(self, writer): for chunk in self.chunks: chunk.generate(writer) def each_child(self): return self.chunks class _NamedBlock(_Node): def __init__(self, name, body, template, line): self.name = name self.body = body self.template = template self.line = line def each_child(self): return (self.body,) def generate(self, writer): block = writer.named_blocks[self.name] with writer.include(block.template, self.line): block.body.generate(writer) def find_named_blocks(self, loader, named_blocks): named_blocks[self.name] = self _Node.find_named_blocks(self, loader, named_blocks) class _ExtendsBlock(_Node): def __init__(self, name): self.name = name class _IncludeBlock(_Node): def __init__(self, name, reader, line): self.name = name self.template_name = reader.name self.line = line def find_named_blocks(self, loader, named_blocks): included = loader.load(self.name, self.template_name) included.file.find_named_blocks(loader, named_blocks) def generate(self, writer): included = writer.loader.load(self.name, self.template_name) with writer.include(included, self.line): included.file.body.generate(writer) class _ApplyBlock(_Node): def __init__(self, method, line, body=None): self.method = method self.line = line self.body = body def each_child(self): return (self.body,) def generate(self, writer): method_name = "_tt_apply%d" % writer.apply_counter writer.apply_counter += 1 writer.write_line("def %s():" % method_name, self.line) with writer.indent(): writer.write_line("_tt_buffer = []", self.line) writer.write_line("_tt_append = _tt_buffer.append", self.line) self.body.generate(writer) writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line) writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % ( self.method, method_name), self.line) class _ControlBlock(_Node): def __init__(self, statement, line, body=None): self.statement = statement self.line = line self.body = body def each_child(self): return (self.body,) def generate(self, writer): writer.write_line("%s:" % self.statement, self.line) with writer.indent(): self.body.generate(writer) # Just in case the body was empty writer.write_line("pass", self.line) class _IntermediateControlBlock(_Node): def __init__(self, statement, line): self.statement = statement self.line = line def generate(self, writer): # In case the previous block was empty writer.write_line("pass", self.line) writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1) class _Statement(_Node): def __init__(self, statement, line): self.statement = statement self.line = line def generate(self, writer): writer.write_line(self.statement, self.line) class _Expression(_Node): def __init__(self, expression, line, raw=False): self.expression = expression self.line = line self.raw = raw def generate(self, writer): writer.write_line("_tt_tmp = %s" % self.expression, self.line) writer.write_line("if isinstance(_tt_tmp, _tt_string_types):" " _tt_tmp = _tt_utf8(_tt_tmp)", self.line) writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line) if not self.raw and writer.current_template.autoescape is not None: # In python3 functions like xhtml_escape return unicode, # so we have to convert to utf8 again. writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" % writer.current_template.autoescape, self.line) writer.write_line("_tt_append(_tt_tmp)", self.line) class _Module(_Expression): def __init__(self, expression, line): super(_Module, self).__init__("_tt_modules." + expression, line, raw=True) class _Text(_Node): def __init__(self, value, line, whitespace): self.value = value self.line = line self.whitespace = whitespace def generate(self, writer): value = self.value # Compress whitespace if requested, with a crude heuristic to avoid # altering preformatted whitespace. if "
    " not in value:
                value = filter_whitespace(self.whitespace, value)
    
            if value:
                writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
    
    
    class ParseError(Exception):
        """Raised for template syntax errors.
    
        ``ParseError`` instances have ``filename`` and ``lineno`` attributes
        indicating the position of the error.
    
        .. versionchanged:: 4.3
           Added ``filename`` and ``lineno`` attributes.
        """
        def __init__(self, message, filename=None, lineno=0):
            self.message = message
            # The names "filename" and "lineno" are chosen for consistency
            # with python SyntaxError.
            self.filename = filename
            self.lineno = lineno
    
        def __str__(self):
            return '%s at %s:%d' % (self.message, self.filename, self.lineno)
    
    
    class _CodeWriter(object):
        def __init__(self, file, named_blocks, loader, current_template):
            self.file = file
            self.named_blocks = named_blocks
            self.loader = loader
            self.current_template = current_template
            self.apply_counter = 0
            self.include_stack = []
            self._indent = 0
    
        def indent_size(self):
            return self._indent
    
        def indent(self):
            class Indenter(object):
                def __enter__(_):
                    self._indent += 1
                    return self
    
                def __exit__(_, *args):
                    assert self._indent > 0
                    self._indent -= 1
    
            return Indenter()
    
        def include(self, template, line):
            self.include_stack.append((self.current_template, line))
            self.current_template = template
    
            class IncludeTemplate(object):
                def __enter__(_):
                    return self
    
                def __exit__(_, *args):
                    self.current_template = self.include_stack.pop()[0]
    
            return IncludeTemplate()
    
        def write_line(self, line, line_number, indent=None):
            if indent is None:
                indent = self._indent
            line_comment = '  # %s:%d' % (self.current_template.name, line_number)
            if self.include_stack:
                ancestors = ["%s:%d" % (tmpl.name, lineno)
                             for (tmpl, lineno) in self.include_stack]
                line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
            print("    " * indent + line + line_comment, file=self.file)
    
    
    class _TemplateReader(object):
        def __init__(self, name, text, whitespace):
            self.name = name
            self.text = text
            self.whitespace = whitespace
            self.line = 1
            self.pos = 0
    
        def find(self, needle, start=0, end=None):
            assert start >= 0, start
            pos = self.pos
            start += pos
            if end is None:
                index = self.text.find(needle, start)
            else:
                end += pos
                assert end >= start
                index = self.text.find(needle, start, end)
            if index != -1:
                index -= pos
            return index
    
        def consume(self, count=None):
            if count is None:
                count = len(self.text) - self.pos
            newpos = self.pos + count
            self.line += self.text.count("\n", self.pos, newpos)
            s = self.text[self.pos:newpos]
            self.pos = newpos
            return s
    
        def remaining(self):
            return len(self.text) - self.pos
    
        def __len__(self):
            return self.remaining()
    
        def __getitem__(self, key):
            if type(key) is slice:
                size = len(self)
                start, stop, step = key.indices(size)
                if start is None:
                    start = self.pos
                else:
                    start += self.pos
                if stop is not None:
                    stop += self.pos
                return self.text[slice(start, stop, step)]
            elif key < 0:
                return self.text[key]
            else:
                return self.text[self.pos + key]
    
        def __str__(self):
            return self.text[self.pos:]
    
        def raise_parse_error(self, msg):
            raise ParseError(msg, self.name, self.line)
    
    
    def _format_code(code):
        lines = code.splitlines()
        format = "%%%dd  %%s\n" % len(repr(len(lines) + 1))
        return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
    
    
    def _parse(reader, template, in_block=None, in_loop=None):
        body = _ChunkList([])
        while True:
            # Find next template directive
            curly = 0
            while True:
                curly = reader.find("{", curly)
                if curly == -1 or curly + 1 == reader.remaining():
                    # EOF
                    if in_block:
                        reader.raise_parse_error(
                            "Missing {%% end %%} block for %s" % in_block)
                    body.chunks.append(_Text(reader.consume(), reader.line,
                                             reader.whitespace))
                    return body
                # If the first curly brace is not the start of a special token,
                # start searching from the character after it
                if reader[curly + 1] not in ("{", "%", "#"):
                    curly += 1
                    continue
                # When there are more than 2 curlies in a row, use the
                # innermost ones.  This is useful when generating languages
                # like latex where curlies are also meaningful
                if (curly + 2 < reader.remaining() and
                        reader[curly + 1] == '{' and reader[curly + 2] == '{'):
                    curly += 1
                    continue
                break
    
            # Append any text before the special token
            if curly > 0:
                cons = reader.consume(curly)
                body.chunks.append(_Text(cons, reader.line,
                                         reader.whitespace))
    
            start_brace = reader.consume(2)
            line = reader.line
    
            # Template directives may be escaped as "{{!" or "{%!".
            # In this case output the braces and consume the "!".
            # This is especially useful in conjunction with jquery templates,
            # which also use double braces.
            if reader.remaining() and reader[0] == "!":
                reader.consume(1)
                body.chunks.append(_Text(start_brace, line,
                                         reader.whitespace))
                continue
    
            # Comment
            if start_brace == "{#":
                end = reader.find("#}")
                if end == -1:
                    reader.raise_parse_error("Missing end comment #}")
                contents = reader.consume(end).strip()
                reader.consume(2)
                continue
    
            # Expression
            if start_brace == "{{":
                end = reader.find("}}")
                if end == -1:
                    reader.raise_parse_error("Missing end expression }}")
                contents = reader.consume(end).strip()
                reader.consume(2)
                if not contents:
                    reader.raise_parse_error("Empty expression")
                body.chunks.append(_Expression(contents, line))
                continue
    
            # Block
            assert start_brace == "{%", start_brace
            end = reader.find("%}")
            if end == -1:
                reader.raise_parse_error("Missing end block %}")
            contents = reader.consume(end).strip()
            reader.consume(2)
            if not contents:
                reader.raise_parse_error("Empty block tag ({% %})")
    
            operator, space, suffix = contents.partition(" ")
            suffix = suffix.strip()
    
            # Intermediate ("else", "elif", etc) blocks
            intermediate_blocks = {
                "else": set(["if", "for", "while", "try"]),
                "elif": set(["if"]),
                "except": set(["try"]),
                "finally": set(["try"]),
            }
            allowed_parents = intermediate_blocks.get(operator)
            if allowed_parents is not None:
                if not in_block:
                    reader.raise_parse_error("%s outside %s block" %
                                             (operator, allowed_parents))
                if in_block not in allowed_parents:
                    reader.raise_parse_error(
                        "%s block cannot be attached to %s block" %
                        (operator, in_block))
                body.chunks.append(_IntermediateControlBlock(contents, line))
                continue
    
            # End tag
            elif operator == "end":
                if not in_block:
                    reader.raise_parse_error("Extra {% end %} block")
                return body
    
            elif operator in ("extends", "include", "set", "import", "from",
                              "comment", "autoescape", "whitespace", "raw",
                              "module"):
                if operator == "comment":
                    continue
                if operator == "extends":
                    suffix = suffix.strip('"').strip("'")
                    if not suffix:
                        reader.raise_parse_error("extends missing file path")
                    block = _ExtendsBlock(suffix)
                elif operator in ("import", "from"):
                    if not suffix:
                        reader.raise_parse_error("import missing statement")
                    block = _Statement(contents, line)
                elif operator == "include":
                    suffix = suffix.strip('"').strip("'")
                    if not suffix:
                        reader.raise_parse_error("include missing file path")
                    block = _IncludeBlock(suffix, reader, line)
                elif operator == "set":
                    if not suffix:
                        reader.raise_parse_error("set missing statement")
                    block = _Statement(suffix, line)
                elif operator == "autoescape":
                    fn = suffix.strip()
                    if fn == "None":
                        fn = None
                    template.autoescape = fn
                    continue
                elif operator == "whitespace":
                    mode = suffix.strip()
                    # Validate the selected mode
                    filter_whitespace(mode, '')
                    reader.whitespace = mode
                    continue
                elif operator == "raw":
                    block = _Expression(suffix, line, raw=True)
                elif operator == "module":
                    block = _Module(suffix, line)
                body.chunks.append(block)
                continue
    
            elif operator in ("apply", "block", "try", "if", "for", "while"):
                # parse inner body recursively
                if operator in ("for", "while"):
                    block_body = _parse(reader, template, operator, operator)
                elif operator == "apply":
                    # apply creates a nested function so syntactically it's not
                    # in the loop.
                    block_body = _parse(reader, template, operator, None)
                else:
                    block_body = _parse(reader, template, operator, in_loop)
    
                if operator == "apply":
                    if not suffix:
                        reader.raise_parse_error("apply missing method name")
                    block = _ApplyBlock(suffix, line, block_body)
                elif operator == "block":
                    if not suffix:
                        reader.raise_parse_error("block missing name")
                    block = _NamedBlock(suffix, block_body, template, line)
                else:
                    block = _ControlBlock(contents, line, block_body)
                body.chunks.append(block)
                continue
    
            elif operator in ("break", "continue"):
                if not in_loop:
                    reader.raise_parse_error("%s outside %s block" %
                                             (operator, set(["for", "while"])))
                body.chunks.append(_Statement(contents, line))
                continue
    
            else:
                reader.raise_parse_error("unknown operator: %r" % operator)
    tornado-4.5.3/tornado/test/000077500000000000000000000000001322420601000156045ustar00rootroot00000000000000tornado-4.5.3/tornado/test/__init__.py000066400000000000000000000000001322420601000177030ustar00rootroot00000000000000tornado-4.5.3/tornado/test/__main__.py000066400000000000000000000006211322420601000176750ustar00rootroot00000000000000"""Shim to allow python -m tornado.test.
    
    This only works in python 2.7+.
    """
    from __future__ import absolute_import, division, print_function
    
    from tornado.test.runtests import all, main
    
    # tornado.testing.main autodiscovery relies on 'all' being present in
    # the main module, so import it here even though it is not used directly.
    # The following line prevents a pyflakes warning.
    all = all
    
    main()
    tornado-4.5.3/tornado/test/asyncio_test.py000066400000000000000000000112361322420601000206650ustar00rootroot00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may
    # not use this file except in compliance with the License. You may obtain
    # a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    # License for the specific language governing permissions and limitations
    # under the License.
    
    from __future__ import absolute_import, division, print_function
    
    from tornado import gen
    from tornado.testing import AsyncTestCase, gen_test
    from tornado.test.util import unittest, skipBefore33, skipBefore35, exec_test
    
    try:
        from tornado.platform.asyncio import asyncio
    except ImportError:
        asyncio = None
    else:
        from tornado.platform.asyncio import AsyncIOLoop, to_asyncio_future
        # This is used in dynamically-evaluated code, so silence pyflakes.
        to_asyncio_future
    
    
    @unittest.skipIf(asyncio is None, "asyncio module not present")
    class AsyncIOLoopTest(AsyncTestCase):
        def get_new_ioloop(self):
            io_loop = AsyncIOLoop()
            asyncio.set_event_loop(io_loop.asyncio_loop)
            return io_loop
    
        def test_asyncio_callback(self):
            # Basic test that the asyncio loop is set up correctly.
            asyncio.get_event_loop().call_soon(self.stop)
            self.wait()
    
        @gen_test
        def test_asyncio_future(self):
            # Test that we can yield an asyncio future from a tornado coroutine.
            # Without 'yield from', we must wrap coroutines in ensure_future,
            # which was introduced during Python 3.4, deprecating the prior "async".
            if hasattr(asyncio, 'ensure_future'):
                ensure_future = asyncio.ensure_future
            else:
                ensure_future = asyncio.async
    
            x = yield ensure_future(
                asyncio.get_event_loop().run_in_executor(None, lambda: 42))
            self.assertEqual(x, 42)
    
        @skipBefore33
        @gen_test
        def test_asyncio_yield_from(self):
            # Test that we can use asyncio coroutines with 'yield from'
            # instead of asyncio.async(). This requires python 3.3 syntax.
            namespace = exec_test(globals(), locals(), """
            @gen.coroutine
            def f():
                event_loop = asyncio.get_event_loop()
                x = yield from event_loop.run_in_executor(None, lambda: 42)
                return x
            """)
            result = yield namespace['f']()
            self.assertEqual(result, 42)
    
        @skipBefore35
        def test_asyncio_adapter(self):
            # This test demonstrates that when using the asyncio coroutine
            # runner (i.e. run_until_complete), the to_asyncio_future
            # adapter is needed. No adapter is needed in the other direction,
            # as demonstrated by other tests in the package.
            @gen.coroutine
            def tornado_coroutine():
                yield gen.Task(self.io_loop.add_callback)
                raise gen.Return(42)
            native_coroutine_without_adapter = exec_test(globals(), locals(), """
            async def native_coroutine_without_adapter():
                return await tornado_coroutine()
            """)["native_coroutine_without_adapter"]
    
            native_coroutine_with_adapter = exec_test(globals(), locals(), """
            async def native_coroutine_with_adapter():
                return await to_asyncio_future(tornado_coroutine())
            """)["native_coroutine_with_adapter"]
    
            # Use the adapter, but two degrees from the tornado coroutine.
            native_coroutine_with_adapter2 = exec_test(globals(), locals(), """
            async def native_coroutine_with_adapter2():
                return await to_asyncio_future(native_coroutine_without_adapter())
            """)["native_coroutine_with_adapter2"]
    
            # Tornado supports native coroutines both with and without adapters
            self.assertEqual(
                self.io_loop.run_sync(native_coroutine_without_adapter),
                42)
            self.assertEqual(
                self.io_loop.run_sync(native_coroutine_with_adapter),
                42)
            self.assertEqual(
                self.io_loop.run_sync(native_coroutine_with_adapter2),
                42)
    
            # Asyncio only supports coroutines that yield asyncio-compatible
            # Futures.
            with self.assertRaises(RuntimeError):
                asyncio.get_event_loop().run_until_complete(
                    native_coroutine_without_adapter())
            self.assertEqual(
                asyncio.get_event_loop().run_until_complete(
                    native_coroutine_with_adapter()),
                42)
            self.assertEqual(
                asyncio.get_event_loop().run_until_complete(
                    native_coroutine_with_adapter2()),
                42)
    tornado-4.5.3/tornado/test/auth_test.py000066400000000000000000000541441322420601000201660ustar00rootroot00000000000000# These tests do not currently do much to verify the correct implementation
    # of the openid/oauth protocols, they just exercise the major code paths
    # and ensure that it doesn't blow up (e.g. with unicode/bytes issues in
    # python 3)
    
    
    from __future__ import absolute_import, division, print_function
    from tornado.auth import OpenIdMixin, OAuthMixin, OAuth2Mixin, TwitterMixin, AuthError, GoogleOAuth2Mixin, FacebookGraphMixin
    from tornado.concurrent import Future
    from tornado.escape import json_decode
    from tornado import gen
    from tornado.httputil import url_concat
    from tornado.log import gen_log
    from tornado.testing import AsyncHTTPTestCase, ExpectLog
    from tornado.web import RequestHandler, Application, asynchronous, HTTPError
    
    
    class OpenIdClientLoginHandler(RequestHandler, OpenIdMixin):
        def initialize(self, test):
            self._OPENID_ENDPOINT = test.get_url('/openid/server/authenticate')
    
        @asynchronous
        def get(self):
            if self.get_argument('openid.mode', None):
                self.get_authenticated_user(
                    self.on_user, http_client=self.settings['http_client'])
                return
            res = self.authenticate_redirect()
            assert isinstance(res, Future)
            assert res.done()
    
        def on_user(self, user):
            if user is None:
                raise Exception("user is None")
            self.finish(user)
    
    
    class OpenIdServerAuthenticateHandler(RequestHandler):
        def post(self):
            if self.get_argument('openid.mode') != 'check_authentication':
                raise Exception("incorrect openid.mode %r")
            self.write('is_valid:true')
    
    
    class OAuth1ClientLoginHandler(RequestHandler, OAuthMixin):
        def initialize(self, test, version):
            self._OAUTH_VERSION = version
            self._OAUTH_REQUEST_TOKEN_URL = test.get_url('/oauth1/server/request_token')
            self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth1/server/authorize')
            self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/oauth1/server/access_token')
    
        def _oauth_consumer_token(self):
            return dict(key='asdf', secret='qwer')
    
        @asynchronous
        def get(self):
            if self.get_argument('oauth_token', None):
                self.get_authenticated_user(
                    self.on_user, http_client=self.settings['http_client'])
                return
            res = self.authorize_redirect(http_client=self.settings['http_client'])
            assert isinstance(res, Future)
    
        def on_user(self, user):
            if user is None:
                raise Exception("user is None")
            self.finish(user)
    
        def _oauth_get_user(self, access_token, callback):
            if self.get_argument('fail_in_get_user', None):
                raise Exception("failing in get_user")
            if access_token != dict(key='uiop', secret='5678'):
                raise Exception("incorrect access token %r" % access_token)
            callback(dict(email='foo@example.com'))
    
    
    class OAuth1ClientLoginCoroutineHandler(OAuth1ClientLoginHandler):
        """Replaces OAuth1ClientLoginCoroutineHandler's get() with a coroutine."""
        @gen.coroutine
        def get(self):
            if self.get_argument('oauth_token', None):
                # Ensure that any exceptions are set on the returned Future,
                # not simply thrown into the surrounding StackContext.
                try:
                    yield self.get_authenticated_user()
                except Exception as e:
                    self.set_status(503)
                    self.write("got exception: %s" % e)
            else:
                yield self.authorize_redirect()
    
    
    class OAuth1ClientRequestParametersHandler(RequestHandler, OAuthMixin):
        def initialize(self, version):
            self._OAUTH_VERSION = version
    
        def _oauth_consumer_token(self):
            return dict(key='asdf', secret='qwer')
    
        def get(self):
            params = self._oauth_request_parameters(
                'http://www.example.com/api/asdf',
                dict(key='uiop', secret='5678'),
                parameters=dict(foo='bar'))
            self.write(params)
    
    
    class OAuth1ServerRequestTokenHandler(RequestHandler):
        def get(self):
            self.write('oauth_token=zxcv&oauth_token_secret=1234')
    
    
    class OAuth1ServerAccessTokenHandler(RequestHandler):
        def get(self):
            self.write('oauth_token=uiop&oauth_token_secret=5678')
    
    
    class OAuth2ClientLoginHandler(RequestHandler, OAuth2Mixin):
        def initialize(self, test):
            self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth2/server/authorize')
    
        def get(self):
            res = self.authorize_redirect()
            assert isinstance(res, Future)
            assert res.done()
    
    
    class FacebookClientLoginHandler(RequestHandler, FacebookGraphMixin):
        def initialize(self, test):
            self._OAUTH_AUTHORIZE_URL = test.get_url('/facebook/server/authorize')
            self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/facebook/server/access_token')
            self._FACEBOOK_BASE_URL = test.get_url('/facebook/server')
    
        @gen.coroutine
        def get(self):
            if self.get_argument("code", None):
                user = yield self.get_authenticated_user(
                    redirect_uri=self.request.full_url(),
                    client_id=self.settings["facebook_api_key"],
                    client_secret=self.settings["facebook_secret"],
                    code=self.get_argument("code"))
                self.write(user)
            else:
                yield self.authorize_redirect(
                    redirect_uri=self.request.full_url(),
                    client_id=self.settings["facebook_api_key"],
                    extra_params={"scope": "read_stream,offline_access"})
    
    
    class FacebookServerAccessTokenHandler(RequestHandler):
        def get(self):
            self.write(dict(access_token="asdf", expires_in=3600))
    
    
    class FacebookServerMeHandler(RequestHandler):
        def get(self):
            self.write('{}')
    
    
    class TwitterClientHandler(RequestHandler, TwitterMixin):
        def initialize(self, test):
            self._OAUTH_REQUEST_TOKEN_URL = test.get_url('/oauth1/server/request_token')
            self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/twitter/server/access_token')
            self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth1/server/authorize')
            self._TWITTER_BASE_URL = test.get_url('/twitter/api')
    
        def get_auth_http_client(self):
            return self.settings['http_client']
    
    
    class TwitterClientLoginHandler(TwitterClientHandler):
        @asynchronous
        def get(self):
            if self.get_argument("oauth_token", None):
                self.get_authenticated_user(self.on_user)
                return
            self.authorize_redirect()
    
        def on_user(self, user):
            if user is None:
                raise Exception("user is None")
            self.finish(user)
    
    
    class TwitterClientLoginGenEngineHandler(TwitterClientHandler):
        @asynchronous
        @gen.engine
        def get(self):
            if self.get_argument("oauth_token", None):
                user = yield self.get_authenticated_user()
                self.finish(user)
            else:
                # Old style: with @gen.engine we can ignore the Future from
                # authorize_redirect.
                self.authorize_redirect()
    
    
    class TwitterClientLoginGenCoroutineHandler(TwitterClientHandler):
        @gen.coroutine
        def get(self):
            if self.get_argument("oauth_token", None):
                user = yield self.get_authenticated_user()
                self.finish(user)
            else:
                # New style: with @gen.coroutine the result must be yielded
                # or else the request will be auto-finished too soon.
                yield self.authorize_redirect()
    
    
    class TwitterClientShowUserHandler(TwitterClientHandler):
        @asynchronous
        @gen.engine
        def get(self):
            # TODO: would be nice to go through the login flow instead of
            # cheating with a hard-coded access token.
            response = yield gen.Task(self.twitter_request,
                                      '/users/show/%s' % self.get_argument('name'),
                                      access_token=dict(key='hjkl', secret='vbnm'))
            if response is None:
                self.set_status(500)
                self.finish('error from twitter request')
            else:
                self.finish(response)
    
    
    class TwitterClientShowUserFutureHandler(TwitterClientHandler):
        @asynchronous
        @gen.engine
        def get(self):
            try:
                response = yield self.twitter_request(
                    '/users/show/%s' % self.get_argument('name'),
                    access_token=dict(key='hjkl', secret='vbnm'))
            except AuthError as e:
                self.set_status(500)
                self.finish(str(e))
                return
            assert response is not None
            self.finish(response)
    
    
    class TwitterServerAccessTokenHandler(RequestHandler):
        def get(self):
            self.write('oauth_token=hjkl&oauth_token_secret=vbnm&screen_name=foo')
    
    
    class TwitterServerShowUserHandler(RequestHandler):
        def get(self, screen_name):
            if screen_name == 'error':
                raise HTTPError(500)
            assert 'oauth_nonce' in self.request.arguments
            assert 'oauth_timestamp' in self.request.arguments
            assert 'oauth_signature' in self.request.arguments
            assert self.get_argument('oauth_consumer_key') == 'test_twitter_consumer_key'
            assert self.get_argument('oauth_signature_method') == 'HMAC-SHA1'
            assert self.get_argument('oauth_version') == '1.0'
            assert self.get_argument('oauth_token') == 'hjkl'
            self.write(dict(screen_name=screen_name, name=screen_name.capitalize()))
    
    
    class TwitterServerVerifyCredentialsHandler(RequestHandler):
        def get(self):
            assert 'oauth_nonce' in self.request.arguments
            assert 'oauth_timestamp' in self.request.arguments
            assert 'oauth_signature' in self.request.arguments
            assert self.get_argument('oauth_consumer_key') == 'test_twitter_consumer_key'
            assert self.get_argument('oauth_signature_method') == 'HMAC-SHA1'
            assert self.get_argument('oauth_version') == '1.0'
            assert self.get_argument('oauth_token') == 'hjkl'
            self.write(dict(screen_name='foo', name='Foo'))
    
    
    class AuthTest(AsyncHTTPTestCase):
        def get_app(self):
            return Application(
                [
                    # test endpoints
                    ('/openid/client/login', OpenIdClientLoginHandler, dict(test=self)),
                    ('/oauth10/client/login', OAuth1ClientLoginHandler,
                     dict(test=self, version='1.0')),
                    ('/oauth10/client/request_params',
                     OAuth1ClientRequestParametersHandler,
                     dict(version='1.0')),
                    ('/oauth10a/client/login', OAuth1ClientLoginHandler,
                     dict(test=self, version='1.0a')),
                    ('/oauth10a/client/login_coroutine',
                     OAuth1ClientLoginCoroutineHandler,
                     dict(test=self, version='1.0a')),
                    ('/oauth10a/client/request_params',
                     OAuth1ClientRequestParametersHandler,
                     dict(version='1.0a')),
                    ('/oauth2/client/login', OAuth2ClientLoginHandler, dict(test=self)),
    
                    ('/facebook/client/login', FacebookClientLoginHandler, dict(test=self)),
    
                    ('/twitter/client/login', TwitterClientLoginHandler, dict(test=self)),
                    ('/twitter/client/login_gen_engine', TwitterClientLoginGenEngineHandler, dict(test=self)),
                    ('/twitter/client/login_gen_coroutine', TwitterClientLoginGenCoroutineHandler, dict(test=self)),
                    ('/twitter/client/show_user', TwitterClientShowUserHandler, dict(test=self)),
                    ('/twitter/client/show_user_future', TwitterClientShowUserFutureHandler, dict(test=self)),
    
                    # simulated servers
                    ('/openid/server/authenticate', OpenIdServerAuthenticateHandler),
                    ('/oauth1/server/request_token', OAuth1ServerRequestTokenHandler),
                    ('/oauth1/server/access_token', OAuth1ServerAccessTokenHandler),
    
                    ('/facebook/server/access_token', FacebookServerAccessTokenHandler),
                    ('/facebook/server/me', FacebookServerMeHandler),
                    ('/twitter/server/access_token', TwitterServerAccessTokenHandler),
                    (r'/twitter/api/users/show/(.*)\.json', TwitterServerShowUserHandler),
                    (r'/twitter/api/account/verify_credentials\.json', TwitterServerVerifyCredentialsHandler),
                ],
                http_client=self.http_client,
                twitter_consumer_key='test_twitter_consumer_key',
                twitter_consumer_secret='test_twitter_consumer_secret',
                facebook_api_key='test_facebook_api_key',
                facebook_secret='test_facebook_secret')
    
        def test_openid_redirect(self):
            response = self.fetch('/openid/client/login', follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue(
                '/openid/server/authenticate?' in response.headers['Location'])
    
        def test_openid_get_user(self):
            response = self.fetch('/openid/client/login?openid.mode=blah&openid.ns.ax=http://openid.net/srv/ax/1.0&openid.ax.type.email=http://axschema.org/contact/email&openid.ax.value.email=foo@example.com')
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed["email"], "foo@example.com")
    
        def test_oauth10_redirect(self):
            response = self.fetch('/oauth10/client/login', follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue(response.headers['Location'].endswith(
                '/oauth1/server/authorize?oauth_token=zxcv'))
            # the cookie is base64('zxcv')|base64('1234')
            self.assertTrue(
                '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
                response.headers['Set-Cookie'])
    
        def test_oauth10_get_user(self):
            response = self.fetch(
                '/oauth10/client/login?oauth_token=zxcv',
                headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed['email'], 'foo@example.com')
            self.assertEqual(parsed['access_token'], dict(key='uiop', secret='5678'))
    
        def test_oauth10_request_parameters(self):
            response = self.fetch('/oauth10/client/request_params')
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed['oauth_consumer_key'], 'asdf')
            self.assertEqual(parsed['oauth_token'], 'uiop')
            self.assertTrue('oauth_nonce' in parsed)
            self.assertTrue('oauth_signature' in parsed)
    
        def test_oauth10a_redirect(self):
            response = self.fetch('/oauth10a/client/login', follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue(response.headers['Location'].endswith(
                '/oauth1/server/authorize?oauth_token=zxcv'))
            # the cookie is base64('zxcv')|base64('1234')
            self.assertTrue(
                '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
                response.headers['Set-Cookie'])
    
        def test_oauth10a_get_user(self):
            response = self.fetch(
                '/oauth10a/client/login?oauth_token=zxcv',
                headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed['email'], 'foo@example.com')
            self.assertEqual(parsed['access_token'], dict(key='uiop', secret='5678'))
    
        def test_oauth10a_request_parameters(self):
            response = self.fetch('/oauth10a/client/request_params')
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed['oauth_consumer_key'], 'asdf')
            self.assertEqual(parsed['oauth_token'], 'uiop')
            self.assertTrue('oauth_nonce' in parsed)
            self.assertTrue('oauth_signature' in parsed)
    
        def test_oauth10a_get_user_coroutine_exception(self):
            response = self.fetch(
                '/oauth10a/client/login_coroutine?oauth_token=zxcv&fail_in_get_user=true',
                headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
            self.assertEqual(response.code, 503)
    
        def test_oauth2_redirect(self):
            response = self.fetch('/oauth2/client/login', follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue('/oauth2/server/authorize?' in response.headers['Location'])
    
        def test_facebook_login(self):
            response = self.fetch('/facebook/client/login', follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue('/facebook/server/authorize?' in response.headers['Location'])
            response = self.fetch('/facebook/client/login?code=1234', follow_redirects=False)
            self.assertEqual(response.code, 200)
            user = json_decode(response.body)
            self.assertEqual(user['access_token'], 'asdf')
            self.assertEqual(user['session_expires'], '3600')
    
        def base_twitter_redirect(self, url):
            # Same as test_oauth10a_redirect
            response = self.fetch(url, follow_redirects=False)
            self.assertEqual(response.code, 302)
            self.assertTrue(response.headers['Location'].endswith(
                '/oauth1/server/authorize?oauth_token=zxcv'))
            # the cookie is base64('zxcv')|base64('1234')
            self.assertTrue(
                '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
                response.headers['Set-Cookie'])
    
        def test_twitter_redirect(self):
            self.base_twitter_redirect('/twitter/client/login')
    
        def test_twitter_redirect_gen_engine(self):
            self.base_twitter_redirect('/twitter/client/login_gen_engine')
    
        def test_twitter_redirect_gen_coroutine(self):
            self.base_twitter_redirect('/twitter/client/login_gen_coroutine')
    
        def test_twitter_get_user(self):
            response = self.fetch(
                '/twitter/client/login?oauth_token=zxcv',
                headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
            response.rethrow()
            parsed = json_decode(response.body)
            self.assertEqual(parsed,
                             {u'access_token': {u'key': u'hjkl',
                                                u'screen_name': u'foo',
                                                u'secret': u'vbnm'},
                              u'name': u'Foo',
                              u'screen_name': u'foo',
                              u'username': u'foo'})
    
        def test_twitter_show_user(self):
            response = self.fetch('/twitter/client/show_user?name=somebody')
            response.rethrow()
            self.assertEqual(json_decode(response.body),
                             {'name': 'Somebody', 'screen_name': 'somebody'})
    
        def test_twitter_show_user_error(self):
            with ExpectLog(gen_log, 'Error response HTTP 500'):
                response = self.fetch('/twitter/client/show_user?name=error')
            self.assertEqual(response.code, 500)
            self.assertEqual(response.body, b'error from twitter request')
    
        def test_twitter_show_user_future(self):
            response = self.fetch('/twitter/client/show_user_future?name=somebody')
            response.rethrow()
            self.assertEqual(json_decode(response.body),
                             {'name': 'Somebody', 'screen_name': 'somebody'})
    
        def test_twitter_show_user_future_error(self):
            response = self.fetch('/twitter/client/show_user_future?name=error')
            self.assertEqual(response.code, 500)
            self.assertIn(b'Error response HTTP 500', response.body)
    
    
    class GoogleLoginHandler(RequestHandler, GoogleOAuth2Mixin):
        def initialize(self, test):
            self.test = test
            self._OAUTH_REDIRECT_URI = test.get_url('/client/login')
            self._OAUTH_AUTHORIZE_URL = test.get_url('/google/oauth2/authorize')
            self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/google/oauth2/token')
    
        @gen.coroutine
        def get(self):
            code = self.get_argument('code', None)
            if code is not None:
                # retrieve authenticate google user
                access = yield self.get_authenticated_user(self._OAUTH_REDIRECT_URI,
                                                           code)
                user = yield self.oauth2_request(
                    self.test.get_url("/google/oauth2/userinfo"),
                    access_token=access["access_token"])
                # return the user and access token as json
                user["access_token"] = access["access_token"]
                self.write(user)
            else:
                yield self.authorize_redirect(
                    redirect_uri=self._OAUTH_REDIRECT_URI,
                    client_id=self.settings['google_oauth']['key'],
                    client_secret=self.settings['google_oauth']['secret'],
                    scope=['profile', 'email'],
                    response_type='code',
                    extra_params={'prompt': 'select_account'})
    
    
    class GoogleOAuth2AuthorizeHandler(RequestHandler):
        def get(self):
            # issue a fake auth code and redirect to redirect_uri
            code = 'fake-authorization-code'
            self.redirect(url_concat(self.get_argument('redirect_uri'),
                                     dict(code=code)))
    
    
    class GoogleOAuth2TokenHandler(RequestHandler):
        def post(self):
            assert self.get_argument('code') == 'fake-authorization-code'
            # issue a fake token
            self.finish({
                'access_token': 'fake-access-token',
                'expires_in': 'never-expires'
            })
    
    
    class GoogleOAuth2UserinfoHandler(RequestHandler):
        def get(self):
            assert self.get_argument('access_token') == 'fake-access-token'
            # return a fake user
            self.finish({
                'name': 'Foo',
                'email': 'foo@example.com'
            })
    
    
    class GoogleOAuth2Test(AsyncHTTPTestCase):
        def get_app(self):
            return Application(
                [
                    # test endpoints
                    ('/client/login', GoogleLoginHandler, dict(test=self)),
    
                    # simulated google authorization server endpoints
                    ('/google/oauth2/authorize', GoogleOAuth2AuthorizeHandler),
                    ('/google/oauth2/token', GoogleOAuth2TokenHandler),
                    ('/google/oauth2/userinfo', GoogleOAuth2UserinfoHandler),
                ],
                google_oauth={
                    "key": 'fake_google_client_id',
                    "secret": 'fake_google_client_secret'
                })
    
        def test_google_login(self):
            response = self.fetch('/client/login')
            self.assertDictEqual({
                u'name': u'Foo',
                u'email': u'foo@example.com',
                u'access_token': u'fake-access-token',
            }, json_decode(response.body))
    tornado-4.5.3/tornado/test/concurrent_test.py000066400000000000000000000330741322420601000214060ustar00rootroot00000000000000#!/usr/bin/env python
    #
    # Copyright 2012 Facebook
    #
    # Licensed under the Apache License, Version 2.0 (the "License"); you may
    # not use this file except in compliance with the License. You may obtain
    # a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    # License for the specific language governing permissions and limitations
    # under the License.
    from __future__ import absolute_import, division, print_function
    
    import gc
    import logging
    import re
    import socket
    import sys
    import traceback
    
    from tornado.concurrent import Future, return_future, ReturnValueIgnoredError, run_on_executor
    from tornado.escape import utf8, to_unicode
    from tornado import gen
    from tornado.iostream import IOStream
    from tornado.log import app_log
    from tornado import stack_context
    from tornado.tcpserver import TCPServer
    from tornado.testing import AsyncTestCase, ExpectLog, LogTrapTestCase, bind_unused_port, gen_test
    from tornado.test.util import unittest
    
    
    try:
        from concurrent import futures
    except ImportError:
        futures = None
    
    
    class ReturnFutureTest(AsyncTestCase):
        @return_future
        def sync_future(self, callback):
            callback(42)
    
        @return_future
        def async_future(self, callback):
            self.io_loop.add_callback(callback, 42)
    
        @return_future
        def immediate_failure(self, callback):
            1 / 0
    
        @return_future
        def delayed_failure(self, callback):
            self.io_loop.add_callback(lambda: 1 / 0)
    
        @return_future
        def return_value(self, callback):
            # Note that the result of both running the callback and returning
            # a value (or raising an exception) is unspecified; with current
            # implementations the last event prior to callback resolution wins.
            return 42
    
        @return_future
        def no_result_future(self, callback):
            callback()
    
        def test_immediate_failure(self):
            with self.assertRaises(ZeroDivisionError):
                # The caller sees the error just like a normal function.
                self.immediate_failure(callback=self.stop)
            # The callback is not run because the function failed synchronously.
            self.io_loop.add_timeout(self.io_loop.time() + 0.05, self.stop)
            result = self.wait()
            self.assertIs(result, None)
    
        def test_return_value(self):
            with self.assertRaises(ReturnValueIgnoredError):
                self.return_value(callback=self.stop)
    
        def test_callback_kw(self):
            future = self.sync_future(callback=self.stop)
            result = self.wait()
            self.assertEqual(result, 42)
            self.assertEqual(future.result(), 42)
    
        def test_callback_positional(self):
            # When the callback is passed in positionally, future_wrap shouldn't
            # add another callback in the kwargs.
            future = self.sync_future(self.stop)
            result = self.wait()
            self.assertEqual(result, 42)
            self.assertEqual(future.result(), 42)
    
        def test_no_callback(self):
            future = self.sync_future()
            self.assertEqual(future.result(), 42)
    
        def test_none_callback_kw(self):
            # explicitly pass None as callback
            future = self.sync_future(callback=None)
            self.assertEqual(future.result(), 42)
    
        def test_none_callback_pos(self):
            future = self.sync_future(None)
            self.assertEqual(future.result(), 42)
    
        def test_async_future(self):
            future = self.async_future()
            self.assertFalse(future.done())
            self.io_loop.add_future(future, self.stop)
            future2 = self.wait()
            self.assertIs(future, future2)
            self.assertEqual(future.result(), 42)
    
        @gen_test
        def test_async_future_gen(self):
            result = yield self.async_future()
            self.assertEqual(result, 42)
    
        def test_delayed_failure(self):
            future = self.delayed_failure()
            self.io_loop.add_future(future, self.stop)
            future2 = self.wait()
            self.assertIs(future, future2)
            with self.assertRaises(ZeroDivisionError):
                future.result()
    
        def test_kw_only_callback(self):
            @return_future
            def f(**kwargs):
                kwargs['callback'](42)
            future = f()
            self.assertEqual(future.result(), 42)
    
        def test_error_in_callback(self):
            self.sync_future(callback=lambda future: 1 / 0)
            # The exception gets caught by our StackContext and will be re-raised
            # when we wait.
            self.assertRaises(ZeroDivisionError, self.wait)
    
        def test_no_result_future(self):
            future = self.no_result_future(self.stop)
            result = self.wait()
            self.assertIs(result, None)
            # result of this future is undefined, but not an error
            future.result()
    
        def test_no_result_future_callback(self):
            future = self.no_result_future(callback=lambda: self.stop())
            result = self.wait()
            self.assertIs(result, None)
            future.result()
    
        @gen_test
        def test_future_traceback(self):
            @return_future
            @gen.engine
            def f(callback):
                yield gen.Task(self.io_loop.add_callback)
                try:
                    1 / 0
                except ZeroDivisionError:
                    self.expected_frame = traceback.extract_tb(
                        sys.exc_info()[2], limit=1)[0]
                    raise
            try:
                yield f()
                self.fail("didn't get expected exception")
            except ZeroDivisionError:
                tb = traceback.extract_tb(sys.exc_info()[2])
                self.assertIn(self.expected_frame, tb)
    
        @gen_test
        def test_uncaught_exception_log(self):
            @gen.coroutine
            def f():
                yield gen.moment
                1 / 0
    
            g = f()
    
            with ExpectLog(app_log,
                           "(?s)Future.* exception was never retrieved:"
                           ".*ZeroDivisionError"):
                yield gen.moment
                yield gen.moment
                del g
                gc.collect()  # for PyPy
    
    
    # The following series of classes demonstrate and test various styles
    # of use, with and without generators and futures.
    
    
    class CapServer(TCPServer):
        def handle_stream(self, stream, address):
            logging.info("handle_stream")
            self.stream = stream
            self.stream.read_until(b"\n", self.handle_read)
    
        def handle_read(self, data):
            logging.info("handle_read")
            data = to_unicode(data)
            if data == data.upper():
                self.stream.write(b"error\talready capitalized\n")
            else:
                # data already has \n
                self.stream.write(utf8("ok\t%s" % data.upper()))
            self.stream.close()
    
    
    class CapError(Exception):
        pass
    
    
    class BaseCapClient(object):
        def __init__(self, port, io_loop):
            self.port = port
            self.io_loop = io_loop
    
        def process_response(self, data):
            status, message = re.match('(.*)\t(.*)\n', to_unicode(data)).groups()
            if status == 'ok':
                return message
            else:
                raise CapError(message)
    
    
    class ManualCapClient(BaseCapClient):
        def capitalize(self, request_data, callback=None):
            logging.info("capitalize")
            self.request_data = request_data
            self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
            self.stream.connect(('127.0.0.1', self.port),
                                callback=self.handle_connect)
            self.future = Future()
            if callback is not None:
                self.future.add_done_callback(
                    stack_context.wrap(lambda future: callback(future.result())))
            return self.future
    
        def handle_connect(self):
            logging.info("handle_connect")
            self.stream.write(utf8(self.request_data + "\n"))
            self.stream.read_until(b'\n', callback=self.handle_read)
    
        def handle_read(self, data):
            logging.info("handle_read")
            self.stream.close()
            try:
                self.future.set_result(self.process_response(data))
            except CapError as e:
                self.future.set_exception(e)
    
    
    class DecoratorCapClient(BaseCapClient):
        @return_future
        def capitalize(self, request_data, callback):
            logging.info("capitalize")
            self.request_data = request_data
            self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
            self.stream.connect(('127.0.0.1', self.port),
                                callback=self.handle_connect)
            self.callback = callback
    
        def handle_connect(self):
            logging.info("handle_connect")
            self.stream.write(utf8(self.request_data + "\n"))
            self.stream.read_until(b'\n', callback=self.handle_read)
    
        def handle_read(self, data):
            logging.info("handle_read")
            self.stream.close()
            self.callback(self.process_response(data))
    
    
    class GeneratorCapClient(BaseCapClient):
        @return_future
        @gen.engine
        def capitalize(self, request_data, callback):
            logging.info('capitalize')
            stream = IOStream(socket.socket(), io_loop=self.io_loop)
            logging.info('connecting')
            yield gen.Task(stream.connect, ('127.0.0.1', self.port))
            stream.write(utf8(request_data + '\n'))
            logging.info('reading')
            data = yield gen.Task(stream.read_until, b'\n')
            logging.info('returning')
            stream.close()
            callback(self.process_response(data))
    
    
    class ClientTestMixin(object):
        def setUp(self):
            super(ClientTestMixin, self).setUp()  # type: ignore
            self.server = CapServer(io_loop=self.io_loop)
            sock, port = bind_unused_port()
            self.server.add_sockets([sock])
            self.client = self.client_class(io_loop=self.io_loop, port=port)
    
        def tearDown(self):
            self.server.stop()
            super(ClientTestMixin, self).tearDown()  # type: ignore
    
        def test_callback(self):
            self.client.capitalize("hello", callback=self.stop)
            result = self.wait()
            self.assertEqual(result, "HELLO")
    
        def test_callback_error(self):
            self.client.capitalize("HELLO", callback=self.stop)
            self.assertRaisesRegexp(CapError, "already capitalized", self.wait)
    
        def test_future(self):
            future = self.client.capitalize("hello")
            self.io_loop.add_future(future, self.stop)
            self.wait()
            self.assertEqual(future.result(), "HELLO")
    
        def test_future_error(self):
            future = self.client.capitalize("HELLO")
            self.io_loop.add_future(future, self.stop)
            self.wait()
            self.assertRaisesRegexp(CapError, "already capitalized", future.result)
    
        def test_generator(self):
            @gen.engine
            def f():
                result = yield self.client.capitalize("hello")
                self.assertEqual(result, "HELLO")
                self.stop()
            f()
            self.wait()
    
        def test_generator_error(self):
            @gen.engine
            def f():
                with self.assertRaisesRegexp(CapError, "already capitalized"):
                    yield self.client.capitalize("HELLO")
                self.stop()
            f()
            self.wait()
    
    
    class ManualClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
        client_class = ManualCapClient
    
    
    class DecoratorClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
        client_class = DecoratorCapClient
    
    
    class GeneratorClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
        client_class = GeneratorCapClient
    
    
    @unittest.skipIf(futures is None, "concurrent.futures module not present")
    class RunOnExecutorTest(AsyncTestCase):
        @gen_test
        def test_no_calling(self):
            class Object(object):
                def __init__(self, io_loop):
                    self.io_loop = io_loop
                    self.executor = futures.thread.ThreadPoolExecutor(1)
    
                @run_on_executor
                def f(self):
                    return 42
    
            o = Object(io_loop=self.io_loop)
            answer = yield o.f()
            self.assertEqual(answer, 42)
    
        @gen_test
        def test_call_with_no_args(self):
            class Object(object):
                def __init__(self, io_loop):
                    self.io_loop = io_loop
                    self.executor = futures.thread.ThreadPoolExecutor(1)
    
                @run_on_executor()
                def f(self):
                    return 42
    
            o = Object(io_loop=self.io_loop)
            answer = yield o.f()
            self.assertEqual(answer, 42)
    
        @gen_test
        def test_call_with_io_loop(self):
            class Object(object):
                def __init__(self, io_loop):
                    self._io_loop = io_loop
                    self.executor = futures.thread.ThreadPoolExecutor(1)
    
                @run_on_executor(io_loop='_io_loop')
                def f(self):
                    return 42
    
            o = Object(io_loop=self.io_loop)
            answer = yield o.f()
            self.assertEqual(answer, 42)
    
        @gen_test
        def test_call_with_executor(self):
            class Object(object):
                def __init__(self, io_loop):
                    self.io_loop = io_loop
                    self.__executor = futures.thread.ThreadPoolExecutor(1)
    
                @run_on_executor(executor='_Object__executor')
                def f(self):
                    return 42
    
            o = Object(io_loop=self.io_loop)
            answer = yield o.f()
            self.assertEqual(answer, 42)
    
        @gen_test
        def test_call_with_both(self):
            class Object(object):
                def __init__(self, io_loop):
                    self._io_loop = io_loop
                    self.__executor = futures.thread.ThreadPoolExecutor(1)
    
                @run_on_executor(io_loop='_io_loop', executor='_Object__executor')
                def f(self):
                    return 42
    
            o = Object(io_loop=self.io_loop)
            answer = yield o.f()
            self.assertEqual(answer, 42)
    tornado-4.5.3/tornado/test/csv_translations/000077500000000000000000000000001322420601000212005ustar00rootroot00000000000000tornado-4.5.3/tornado/test/csv_translations/fr_FR.csv000066400000000000000000000000221322420601000227050ustar00rootroot00000000000000"school","école"
    tornado-4.5.3/tornado/test/curl_httpclient_test.py000066400000000000000000000113631322420601000224240ustar00rootroot00000000000000# coding: utf-8
    from __future__ import absolute_import, division, print_function
    
    from hashlib import md5
    
    from tornado.escape import utf8
    from tornado.httpclient import HTTPRequest
    from tornado.stack_context import ExceptionStackContext
    from tornado.testing import AsyncHTTPTestCase
    from tornado.test import httpclient_test
    from tornado.test.util import unittest
    from tornado.web import Application, RequestHandler
    
    
    try:
        import pycurl  # type: ignore
    except ImportError:
        pycurl = None
    
    if pycurl is not None:
        from tornado.curl_httpclient import CurlAsyncHTTPClient
    
    
    @unittest.skipIf(pycurl is None, "pycurl module not present")
    class CurlHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
        def get_http_client(self):
            client = CurlAsyncHTTPClient(io_loop=self.io_loop,
                                         defaults=dict(allow_ipv6=False))
            # make sure AsyncHTTPClient magic doesn't give us the wrong class
            self.assertTrue(isinstance(client, CurlAsyncHTTPClient))
            return client
    
    
    class DigestAuthHandler(RequestHandler):
        def get(self):
            realm = 'test'
            opaque = 'asdf'
            # Real implementations would use a random nonce.
            nonce = "1234"
            username = 'foo'
            password = 'bar'
    
            auth_header = self.request.headers.get('Authorization', None)
            if auth_header is not None:
                auth_mode, params = auth_header.split(' ', 1)
                assert auth_mode == 'Digest'
                param_dict = {}
                for pair in params.split(','):
                    k, v = pair.strip().split('=', 1)
                    if v[0] == '"' and v[-1] == '"':
                        v = v[1:-1]
                    param_dict[k] = v
                assert param_dict['realm'] == realm
                assert param_dict['opaque'] == opaque
                assert param_dict['nonce'] == nonce
                assert param_dict['username'] == username
                assert param_dict['uri'] == self.request.path
                h1 = md5(utf8('%s:%s:%s' % (username, realm, password))).hexdigest()
                h2 = md5(utf8('%s:%s' % (self.request.method,
                                         self.request.path))).hexdigest()
                digest = md5(utf8('%s:%s:%s' % (h1, nonce, h2))).hexdigest()
                if digest == param_dict['response']:
                    self.write('ok')
                else:
                    self.write('fail')
            else:
                self.set_status(401)
                self.set_header('WWW-Authenticate',
                                'Digest realm="%s", nonce="%s", opaque="%s"' %
                                (realm, nonce, opaque))
    
    
    class CustomReasonHandler(RequestHandler):
        def get(self):
            self.set_status(200, "Custom reason")
    
    
    class CustomFailReasonHandler(RequestHandler):
        def get(self):
            self.set_status(400, "Custom reason")
    
    
    @unittest.skipIf(pycurl is None, "pycurl module not present")
    class CurlHTTPClientTestCase(AsyncHTTPTestCase):
        def setUp(self):
            super(CurlHTTPClientTestCase, self).setUp()
            self.http_client = self.create_client()
    
        def get_app(self):
            return Application([
                ('/digest', DigestAuthHandler),
                ('/custom_reason', CustomReasonHandler),
                ('/custom_fail_reason', CustomFailReasonHandler),
            ])
    
        def create_client(self, **kwargs):
            return CurlAsyncHTTPClient(self.io_loop, force_instance=True,
                                       defaults=dict(allow_ipv6=False),
                                       **kwargs)
    
        def test_prepare_curl_callback_stack_context(self):
            exc_info = []
    
            def error_handler(typ, value, tb):
                exc_info.append((typ, value, tb))
                self.stop()
                return True
    
            with ExceptionStackContext(error_handler):
                request = HTTPRequest(self.get_url('/'),
                                      prepare_curl_callback=lambda curl: 1 / 0)
            self.http_client.fetch(request, callback=self.stop)
            self.wait()
            self.assertEqual(1, len(exc_info))
            self.assertIs(exc_info[0][0], ZeroDivisionError)
    
        def test_digest_auth(self):
            response = self.fetch('/digest', auth_mode='digest',
                                  auth_username='foo', auth_password='bar')
            self.assertEqual(response.body, b'ok')
    
        def test_custom_reason(self):
            response = self.fetch('/custom_reason')
            self.assertEqual(response.reason, "Custom reason")
    
        def test_fail_custom_reason(self):
            response = self.fetch('/custom_fail_reason')
            self.assertEqual(str(response.error), "HTTP 400: Custom reason")
    
        def test_failed_setup(self):
            self.http_client = self.create_client(max_clients=1)
            for i in range(5):
                response = self.fetch(u'/ユニコード')
                self.assertIsNot(response.error, None)
    tornado-4.5.3/tornado/test/escape_test.py000066400000000000000000000255341322420601000204660ustar00rootroot00000000000000#!/usr/bin/env python
    
    
    from __future__ import absolute_import, division, print_function
    import tornado.escape
    
    from tornado.escape import utf8, xhtml_escape, xhtml_unescape, url_escape, url_unescape, to_unicode, json_decode, json_encode, squeeze, recursive_unicode
    from tornado.util import unicode_type
    from tornado.test.util import unittest
    
    linkify_tests = [
        # (input, linkify_kwargs, expected_output)
    
        ("hello http://world.com/!", {},
         u'hello http://world.com/!'),
    
        ("hello http://world.com/with?param=true&stuff=yes", {},
         u'hello http://world.com/with?param=true&stuff=yes'),
    
        # an opened paren followed by many chars killed Gruber's regex
        ("http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", {},
         u'http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
    
        # as did too many dots at the end
        ("http://url.com/withmany.......................................", {},
         u'http://url.com/withmany.......................................'),
    
        ("http://url.com/withmany((((((((((((((((((((((((((((((((((a)", {},
         u'http://url.com/withmany((((((((((((((((((((((((((((((((((a)'),
    
        # some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
        # plus a fex extras (such as multiple parentheses).
        ("http://foo.com/blah_blah", {},
         u'http://foo.com/blah_blah'),
    
        ("http://foo.com/blah_blah/", {},
         u'http://foo.com/blah_blah/'),
    
        ("(Something like http://foo.com/blah_blah)", {},
         u'(Something like http://foo.com/blah_blah)'),
    
        ("http://foo.com/blah_blah_(wikipedia)", {},
         u'http://foo.com/blah_blah_(wikipedia)'),
    
        ("http://foo.com/blah_(blah)_(wikipedia)_blah", {},
         u'http://foo.com/blah_(blah)_(wikipedia)_blah'),
    
        ("(Something like http://foo.com/blah_blah_(wikipedia))", {},
         u'(Something like http://foo.com/blah_blah_(wikipedia))'),
    
        ("http://foo.com/blah_blah.", {},
         u'http://foo.com/blah_blah.'),
    
        ("http://foo.com/blah_blah/.", {},
         u'http://foo.com/blah_blah/.'),
    
        ("", {},
         u'<http://foo.com/blah_blah>'),
    
        ("", {},
         u'<http://foo.com/blah_blah/>'),
    
        ("http://foo.com/blah_blah,", {},
         u'http://foo.com/blah_blah,'),
    
        ("http://www.example.com/wpstyle/?p=364.", {},
         u'http://www.example.com/wpstyle/?p=364.'),
    
        ("rdar://1234",
         {"permitted_protocols": ["http", "rdar"]},
         u'rdar://1234'),
    
        ("rdar:/1234",
         {"permitted_protocols": ["rdar"]},
         u'rdar:/1234'),
    
        ("http://userid:password@example.com:8080", {},
         u'http://userid:password@example.com:8080'),
    
        ("http://userid@example.com", {},
         u'http://userid@example.com'),
    
        ("http://userid@example.com:8080", {},
         u'http://userid@example.com:8080'),
    
        ("http://userid:password@example.com", {},
         u'http://userid:password@example.com'),
    
        ("message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
         {"permitted_protocols": ["http", "message"]},
         u'message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e'),
    
        (u"http://\u27a1.ws/\u4a39", {},
         u'http://\u27a1.ws/\u4a39'),
    
        ("http://example.com", {},
         u'<tag>http://example.com</tag>'),
    
        ("Just a www.example.com link.", {},
         u'Just a www.example.com link.'),
    
        ("Just a www.example.com link.",
         {"require_protocol": True},
         u'Just a www.example.com link.'),
    
        ("A http://reallylong.com/link/that/exceedsthelenglimit.html",
         {"require_protocol": True, "shorten": True},
         u'A http://reallylong.com/link...'),
    
        ("A http://reallylongdomainnamethatwillbetoolong.com/hi!",
         {"shorten": True},
         u'A http://reallylongdomainnametha...!'),
    
        ("A file:///passwords.txt and http://web.com link", {},
         u'A file:///passwords.txt and http://web.com link'),
    
        ("A file:///passwords.txt and http://web.com link",
         {"permitted_protocols": ["file"]},
         u'A file:///passwords.txt and http://web.com link'),
    
        ("www.external-link.com",
         {"extra_params": 'rel="nofollow" class="external"'},
         u'www.external-link.com'),
    
        ("www.external-link.com and www.internal-link.com/blogs extra",
         {"extra_params": lambda href: 'class="internal"' if href.startswith("http://www.internal-link.com") else 'rel="nofollow" class="external"'},
         u'www.external-link.com and www.internal-link.com/blogs extra'),
    
        ("www.external-link.com",
         {"extra_params": lambda href: '    rel="nofollow" class="external"  '},
         u'www.external-link.com'),
    ]
    
    
    class EscapeTestCase(unittest.TestCase):
        def test_linkify(self):
            for text, kwargs, html in linkify_tests:
                linked = tornado.escape.linkify(text, **kwargs)
                self.assertEqual(linked, html)
    
        def test_xhtml_escape(self):
            tests = [
                ("", "<foo>"),
                (u"", u"<foo>"),
                (b"", b"<foo>"),
    
                ("<>&\"'", "<>&"'"),
                ("&", "&amp;"),
    
                (u"<\u00e9>", u"<\u00e9>"),
                (b"<\xc3\xa9>", b"<\xc3\xa9>"),
            ]
            for unescaped, escaped in tests:
                self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
                self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
    
        def test_xhtml_unescape_numeric(self):
            tests = [
                ('foo bar', 'foo bar'),
                ('foo bar', 'foo bar'),
                ('foo bar', 'foo bar'),
                ('foo઼bar', u'foo\u0abcbar'),
                ('foo&#xyz;bar', 'foo&#xyz;bar'),  # invalid encoding
                ('foo&#;bar', 'foo&#;bar'),        # invalid encoding
                ('foo&#x;bar', 'foo&#x;bar'),      # invalid encoding
            ]
            for escaped, unescaped in tests:
                self.assertEqual(unescaped, xhtml_unescape(escaped))
    
        def test_url_escape_unicode(self):
            tests = [
                # byte strings are passed through as-is
                (u'\u00e9'.encode('utf8'), '%C3%A9'),
                (u'\u00e9'.encode('latin1'), '%E9'),
    
                # unicode strings become utf8
                (u'\u00e9', '%C3%A9'),
            ]
            for unescaped, escaped in tests:
                self.assertEqual(url_escape(unescaped), escaped)
    
        def test_url_unescape_unicode(self):
            tests = [
                ('%C3%A9', u'\u00e9', 'utf8'),
                ('%C3%A9', u'\u00c3\u00a9', 'latin1'),
                ('%C3%A9', utf8(u'\u00e9'), None),
            ]
            for escaped, unescaped, encoding in tests:
                # input strings to url_unescape should only contain ascii
                # characters, but make sure the function accepts both byte
                # and unicode strings.
                self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
                self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
    
        def test_url_escape_quote_plus(self):
            unescaped = '+ #%'
            plus_escaped = '%2B+%23%25'
            escaped = '%2B%20%23%25'
            self.assertEqual(url_escape(unescaped), plus_escaped)
            self.assertEqual(url_escape(unescaped, plus=False), escaped)
            self.assertEqual(url_unescape(plus_escaped), unescaped)
            self.assertEqual(url_unescape(escaped, plus=False), unescaped)
            self.assertEqual(url_unescape(plus_escaped, encoding=None),
                             utf8(unescaped))
            self.assertEqual(url_unescape(escaped, encoding=None, plus=False),
                             utf8(unescaped))
    
        def test_escape_return_types(self):
            # On python2 the escape methods should generally return the same
            # type as their argument
            self.assertEqual(type(xhtml_escape("foo")), str)
            self.assertEqual(type(xhtml_escape(u"foo")), unicode_type)
    
        def test_json_decode(self):
            # json_decode accepts both bytes and unicode, but strings it returns
            # are always unicode.
            self.assertEqual(json_decode(b'"foo"'), u"foo")
            self.assertEqual(json_decode(u'"foo"'), u"foo")
    
            # Non-ascii bytes are interpreted as utf8
            self.assertEqual(json_decode(utf8(u'"\u00e9"')), u"\u00e9")
    
        def test_json_encode(self):
            # json deals with strings, not bytes.  On python 2 byte strings will
            # convert automatically if they are utf8; on python 3 byte strings
            # are not allowed.
            self.assertEqual(json_decode(json_encode(u"\u00e9")), u"\u00e9")
            if bytes is str:
                self.assertEqual(json_decode(json_encode(utf8(u"\u00e9"))), u"\u00e9")
                self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
    
        def test_squeeze(self):
            self.assertEqual(squeeze(u'sequences     of    whitespace   chars'), u'sequences of whitespace chars')
    
        def test_recursive_unicode(self):
            tests = {
                'dict': {b"foo": b"bar"},
                'list': [b"foo", b"bar"],
                'tuple': (b"foo", b"bar"),
                'bytes': b"foo"
            }
            self.assertEqual(recursive_unicode(tests['dict']), {u"foo": u"bar"})
            self.assertEqual(recursive_unicode(tests['list']), [u"foo", u"bar"])
            self.assertEqual(recursive_unicode(tests['tuple']), (u"foo", u"bar"))
            self.assertEqual(recursive_unicode(tests['bytes']), u"foo")
    tornado-4.5.3/tornado/test/gen_test.py000066400000000000000000001344001322420601000177700ustar00rootroot00000000000000from __future__ import absolute_import, division, print_function
    
    import gc
    import contextlib
    import datetime
    import functools
    import sys
    import textwrap
    import time
    import weakref
    
    from tornado.concurrent import return_future, Future
    from tornado.escape import url_escape
    from tornado.httpclient import AsyncHTTPClient
    from tornado.ioloop import IOLoop
    from tornado.log import app_log
    from tornado import stack_context
    from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
    from tornado.test.util import unittest, skipOnTravis, skipBefore33, skipBefore35, skipNotCPython, exec_test
    from tornado.web import Application, RequestHandler, asynchronous, HTTPError
    
    from tornado import gen
    
    try:
        from concurrent import futures
    except ImportError:
        futures = None
    
    
    class GenEngineTest(AsyncTestCase):
        def setUp(self):
            super(GenEngineTest, self).setUp()
            self.named_contexts = []
    
        def named_context(self, name):
            @contextlib.contextmanager
            def context():
                self.named_contexts.append(name)
                try:
                    yield
                finally:
                    self.assertEqual(self.named_contexts.pop(), name)
            return context
    
        def run_gen(self, f):
            f()
            return self.wait()
    
        def delay_callback(self, iterations, callback, arg):
            """Runs callback(arg) after a number of IOLoop iterations."""
            if iterations == 0:
                callback(arg)
            else:
                self.io_loop.add_callback(functools.partial(
                    self.delay_callback, iterations - 1, callback, arg))
    
        @return_future
        def async_future(self, result, callback):
            self.io_loop.add_callback(callback, result)
    
        @gen.coroutine
        def async_exception(self, e):
            yield gen.moment
            raise e
    
        def test_no_yield(self):
            @gen.engine
            def f():
                self.stop()
            self.run_gen(f)
    
        def test_inline_cb(self):
            @gen.engine
            def f():
                (yield gen.Callback("k1"))()
                res = yield gen.Wait("k1")
                self.assertTrue(res is None)
                self.stop()
            self.run_gen(f)
    
        def test_ioloop_cb(self):
            @gen.engine
            def f():
                self.io_loop.add_callback((yield gen.Callback("k1")))
                yield gen.Wait("k1")
                self.stop()
            self.run_gen(f)
    
        def test_exception_phase1(self):
            @gen.engine
            def f():
                1 / 0
            self.assertRaises(ZeroDivisionError, self.run_gen, f)
    
        def test_exception_phase2(self):
            @gen.engine
            def f():
                self.io_loop.add_callback((yield gen.Callback("k1")))
                yield gen.Wait("k1")
                1 / 0
            self.assertRaises(ZeroDivisionError, self.run_gen, f)
    
        def test_exception_in_task_phase1(self):
            def fail_task(callback):
                1 / 0
    
            @gen.engine
            def f():
                try:
                    yield gen.Task(fail_task)
                    raise Exception("did not get expected exception")
                except ZeroDivisionError:
                    self.stop()
            self.run_gen(f)
    
        def test_exception_in_task_phase2(self):
            # This is the case that requires the use of stack_context in gen.engine
            def fail_task(callback):
                self.io_loop.add_callback(lambda: 1 / 0)
    
            @gen.engine
            def f():
                try:
                    yield gen.Task(fail_task)
                    raise Exception("did not get expected exception")
                except ZeroDivisionError:
                    self.stop()
            self.run_gen(f)
    
        def test_with_arg(self):
            @gen.engine
            def f():
                (yield gen.Callback("k1"))(42)
                res = yield gen.Wait("k1")
                self.assertEqual(42, res)
                self.stop()
            self.run_gen(f)
    
        def test_with_arg_tuple(self):
            @gen.engine
            def f():
                (yield gen.Callback((1, 2)))((3, 4))
                res = yield gen.Wait((1, 2))
                self.assertEqual((3, 4), res)
                self.stop()
            self.run_gen(f)
    
        def test_key_reuse(self):
            @gen.engine
            def f():
                yield gen.Callback("k1")
                yield gen.Callback("k1")
                self.stop()
            self.assertRaises(gen.KeyReuseError, self.run_gen, f)
    
        def test_key_reuse_tuple(self):
            @gen.engine
            def f():
                yield gen.Callback((1, 2))
                yield gen.Callback((1, 2))
                self.stop()
            self.assertRaises(gen.KeyReuseError, self.run_gen, f)
    
        def test_key_mismatch(self):
            @gen.engine
            def f():
                yield gen.Callback("k1")
                yield gen.Wait("k2")
                self.stop()
            self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
    
        def test_key_mismatch_tuple(self):
            @gen.engine
            def f():
                yield gen.Callback((1, 2))
                yield gen.Wait((2, 3))
                self.stop()
            self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
    
        def test_leaked_callback(self):
            @gen.engine
            def f():
                yield gen.Callback("k1")
                self.stop()
            self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
    
        def test_leaked_callback_tuple(self):
            @gen.engine
            def f():
                yield gen.Callback((1, 2))
                self.stop()
            self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
    
        def test_parallel_callback(self):
            @gen.engine
            def f():
                for k in range(3):
                    self.io_loop.add_callback((yield gen.Callback(k)))
                yield gen.Wait(1)
                self.io_loop.add_callback((yield gen.Callback(3)))
                yield gen.Wait(0)
                yield gen.Wait(3)
                yield gen.Wait(2)
                self.stop()
            self.run_gen(f)
    
        def test_bogus_yield(self):
            @gen.engine
            def f():
                yield 42
            self.assertRaises(gen.BadYieldError, self.run_gen, f)
    
        def test_bogus_yield_tuple(self):
            @gen.engine
            def f():
                yield (1, 2)
            self.assertRaises(gen.BadYieldError, self.run_gen, f)
    
        def test_reuse(self):
            @gen.engine
            def f():
                self.io_loop.add_callback((yield gen.Callback(0)))
                yield gen.Wait(0)
                self.stop()
            self.run_gen(f)
            self.run_gen(f)
    
        def test_task(self):
            @gen.engine
            def f():
                yield gen.Task(self.io_loop.add_callback)
                self.stop()
            self.run_gen(f)
    
        def test_wait_all(self):
            @gen.engine
            def f():
                (yield gen.Callback("k1"))("v1")
                (yield gen.Callback("k2"))("v2")
                results = yield gen.WaitAll(["k1", "k2"])
                self.assertEqual(results, ["v1", "v2"])
                self.stop()
            self.run_gen(f)
    
        def test_exception_in_yield(self):
            @gen.engine
            def f():
                try:
                    yield gen.Wait("k1")
                    raise Exception("did not get expected exception")
                except gen.UnknownKeyError:
                    pass
                self.stop()
            self.run_gen(f)
    
        def test_resume_after_exception_in_yield(self):
            @gen.engine
            def f():
                try:
                    yield gen.Wait("k1")
                    raise Exception("did not get expected exception")
                except gen.UnknownKeyError:
                    pass
                (yield gen.Callback("k2"))("v2")
                self.assertEqual((yield gen.Wait("k2")), "v2")
                self.stop()
            self.run_gen(f)
    
        def test_orphaned_callback(self):
            @gen.engine
            def f():
                self.orphaned_callback = yield gen.Callback(1)
            try:
                self.run_gen(f)
                raise Exception("did not get expected exception")
            except gen.LeakedCallbackError:
                pass
            self.orphaned_callback()
    
        def test_none(self):
            @gen.engine
            def f():
                yield None
                self.stop()
            self.run_gen(f)
    
        def test_multi(self):
            @gen.engine
            def f():
                (yield gen.Callback("k1"))("v1")
                (yield gen.Callback("k2"))("v2")
                results = yield [gen.Wait("k1"), gen.Wait("k2")]
                self.assertEqual(results, ["v1", "v2"])
                self.stop()
            self.run_gen(f)
    
        def test_multi_dict(self):
            @gen.engine
            def f():
                (yield gen.Callback("k1"))("v1")
                (yield gen.Callback("k2"))("v2")
                results = yield dict(foo=gen.Wait("k1"), bar=gen.Wait("k2"))
                self.assertEqual(results, dict(foo="v1", bar="v2"))
                self.stop()
            self.run_gen(f)
    
        # The following tests explicitly run with both gen.Multi
        # and gen.multi_future (Task returns a Future, so it can be used
        # with either).
        def test_multi_yieldpoint_delayed(self):
            @gen.engine
            def f():
                # callbacks run at different times
                responses = yield gen.Multi([
                    gen.Task(self.delay_callback, 3, arg="v1"),
                    gen.Task(self.delay_callback, 1, arg="v2"),
                ])
                self.assertEqual(responses, ["v1", "v2"])
                self.stop()
            self.run_gen(f)
    
        def test_multi_yieldpoint_dict_delayed(self):
            @gen.engine
            def f():
                # callbacks run at different times
                responses = yield gen.Multi(dict(
                    foo=gen.Task(self.delay_callback, 3, arg="v1"),
                    bar=gen.Task(self.delay_callback, 1, arg="v2"),
                ))
                self.assertEqual(responses, dict(foo="v1", bar="v2"))
                self.stop()
            self.run_gen(f)
    
        def test_multi_future_delayed(self):
            @gen.engine
            def f():
                # callbacks run at different times
                responses = yield gen.multi_future([
                    gen.Task(self.delay_callback, 3, arg="v1"),
                    gen.Task(self.delay_callback, 1, arg="v2"),
                ])
                self.assertEqual(responses, ["v1", "v2"])
                self.stop()
            self.run_gen(f)
    
        def test_multi_future_dict_delayed(self):
            @gen.engine
            def f():
                # callbacks run at different times
                responses = yield gen.multi_future(dict(
                    foo=gen.Task(self.delay_callback, 3, arg="v1"),
                    bar=gen.Task(self.delay_callback, 1, arg="v2"),
                ))
                self.assertEqual(responses, dict(foo="v1", bar="v2"))
                self.stop()
            self.run_gen(f)
    
        @skipOnTravis
        @gen_test
        def test_multi_performance(self):
            # Yielding a list used to have quadratic performance; make
            # sure a large list stays reasonable.  On my laptop a list of
            # 2000 used to take 1.8s, now it takes 0.12.
            start = time.time()
            yield [gen.Task(self.io_loop.add_callback) for i in range(2000)]
            end = time.time()
            self.assertLess(end - start, 1.0)
    
        @gen_test
        def test_multi_empty(self):
            # Empty lists or dicts should return the same type.
            x = yield []
            self.assertTrue(isinstance(x, list))
            y = yield {}
            self.assertTrue(isinstance(y, dict))
    
        @gen_test
        def test_multi_mixed_types(self):
            # A YieldPoint (Wait) and Future (Task) can be combined
            # (and use the YieldPoint codepath)
            (yield gen.Callback("k1"))("v1")
            responses = yield [gen.Wait("k1"),
                               gen.Task(self.delay_callback, 3, arg="v2")]
            self.assertEqual(responses, ["v1", "v2"])
    
        @gen_test
        def test_future(self):
            result = yield self.async_future(1)
            self.assertEqual(result, 1)
    
        @gen_test
        def test_multi_future(self):
            results = yield [self.async_future(1), self.async_future(2)]
            self.assertEqual(results, [1, 2])
    
        @gen_test
        def test_multi_future_duplicate(self):
            f = self.async_future(2)
            results = yield [self.async_future(1), f, self.async_future(3), f]
            self.assertEqual(results, [1, 2, 3, 2])
    
        @gen_test
        def test_multi_dict_future(self):
            results = yield dict(foo=self.async_future(1), bar=self.async_future(2))
            self.assertEqual(results, dict(foo=1, bar=2))
    
        @gen_test
        def test_multi_exceptions(self):
            with ExpectLog(app_log, "Multiple exceptions in yield list"):
                with self.assertRaises(RuntimeError) as cm:
                    yield gen.Multi([self.async_exception(RuntimeError("error 1")),
                                     self.async_exception(RuntimeError("error 2"))])
            self.assertEqual(str(cm.exception), "error 1")
    
            # With only one exception, no error is logged.
            with self.assertRaises(RuntimeError):
                yield gen.Multi([self.async_exception(RuntimeError("error 1")),
                                 self.async_future(2)])
    
            # Exception logging may be explicitly quieted.
            with self.assertRaises(RuntimeError):
                yield gen.Multi([self.async_exception(RuntimeError("error 1")),
                                 self.async_exception(RuntimeError("error 2"))],
                                quiet_exceptions=RuntimeError)
    
        @gen_test
        def test_multi_future_exceptions(self):
            with ExpectLog(app_log, "Multiple exceptions in yield list"):
                with self.assertRaises(RuntimeError) as cm:
                    yield [self.async_exception(RuntimeError("error 1")),
                           self.async_exception(RuntimeError("error 2"))]
            self.assertEqual(str(cm.exception), "error 1")
    
            # With only one exception, no error is logged.
            with self.assertRaises(RuntimeError):
                yield [self.async_exception(RuntimeError("error 1")),
                       self.async_future(2)]
    
            # Exception logging may be explicitly quieted.
            with self.assertRaises(RuntimeError):
                yield gen.multi_future(
                    [self.async_exception(RuntimeError("error 1")),
                     self.async_exception(RuntimeError("error 2"))],
                    quiet_exceptions=RuntimeError)
    
        def test_arguments(self):
            @gen.engine
            def f():
                (yield gen.Callback("noargs"))()
                self.assertEqual((yield gen.Wait("noargs")), None)
                (yield gen.Callback("1arg"))(42)
                self.assertEqual((yield gen.Wait("1arg")), 42)
    
                (yield gen.Callback("kwargs"))(value=42)
                result = yield gen.Wait("kwargs")
                self.assertTrue(isinstance(result, gen.Arguments))
                self.assertEqual(((), dict(value=42)), result)
                self.assertEqual(dict(value=42), result.kwargs)
    
                (yield gen.Callback("2args"))(42, 43)
                result = yield gen.Wait("2args")
                self.assertTrue(isinstance(result, gen.Arguments))
                self.assertEqual(((42, 43), {}), result)
                self.assertEqual((42, 43), result.args)
    
                def task_func(callback):
                    callback(None, error="foo")
                result = yield gen.Task(task_func)
                self.assertTrue(isinstance(result, gen.Arguments))
                self.assertEqual(((None,), dict(error="foo")), result)
    
                self.stop()
            self.run_gen(f)
    
        def test_stack_context_leak(self):
            # regression test: repeated invocations of a gen-based
            # function should not result in accumulated stack_contexts
            def _stack_depth():
                head = stack_context._state.contexts[1]
                length = 0
    
                while head is not None:
                    length += 1
                    head = head.old_contexts[1]
    
                return length
    
            @gen.engine
            def inner(callback):
                yield gen.Task(self.io_loop.add_callback)
                callback()
    
            @gen.engine
            def outer():
                for i in range(10):
                    yield gen.Task(inner)
    
                stack_increase = _stack_depth() - initial_stack_depth
                self.assertTrue(stack_increase <= 2)
                self.stop()
            initial_stack_depth = _stack_depth()
            self.run_gen(outer)
    
        def test_stack_context_leak_exception(self):
            # same as previous, but with a function that exits with an exception
            @gen.engine
            def inner(callback):
                yield gen.Task(self.io_loop.add_callback)
                1 / 0
    
            @gen.engine
            def outer():
                for i in range(10):
                    try:
                        yield gen.Task(inner)
                    except ZeroDivisionError:
                        pass
                stack_increase = len(stack_context._state.contexts) - initial_stack_depth
                self.assertTrue(stack_increase <= 2)
                self.stop()
            initial_stack_depth = len(stack_context._state.contexts)
            self.run_gen(outer)
    
        def function_with_stack_context(self, callback):
            # Technically this function should stack_context.wrap its callback
            # upon entry.  However, it is very common for this step to be
            # omitted.
            def step2():
                self.assertEqual(self.named_contexts, ['a'])
                self.io_loop.add_callback(callback)
    
            with stack_context.StackContext(self.named_context('a')):
                self.io_loop.add_callback(step2)
    
        @gen_test
        def test_wait_transfer_stack_context(self):
            # Wait should not pick up contexts from where callback was invoked,
            # even if that function improperly fails to wrap its callback.
            cb = yield gen.Callback('k1')
            self.function_with_stack_context(cb)
            self.assertEqual(self.named_contexts, [])
            yield gen.Wait('k1')
            self.assertEqual(self.named_contexts, [])
    
        @gen_test
        def test_task_transfer_stack_context(self):
            yield gen.Task(self.function_with_stack_context)
            self.assertEqual(self.named_contexts, [])
    
        def test_raise_after_stop(self):
            # This pattern will be used in the following tests so make sure
            # the exception propagates as expected.
            @gen.engine
            def f():
                self.stop()
                1 / 0
    
            with self.assertRaises(ZeroDivisionError):
                self.run_gen(f)
    
        def test_sync_raise_return(self):
            # gen.Return is allowed in @gen.engine, but it may not be used
            # to return a value.
            @gen.engine
            def f():
                self.stop(42)
                raise gen.Return()
    
            result = self.run_gen(f)
            self.assertEqual(result, 42)
    
        def test_async_raise_return(self):
            @gen.engine
            def f():
                yield gen.Task(self.io_loop.add_callback)
                self.stop(42)
                raise gen.Return()
    
            result = self.run_gen(f)
            self.assertEqual(result, 42)
    
        def test_sync_raise_return_value(self):
            @gen.engine
            def f():
                raise gen.Return(42)
    
            with self.assertRaises(gen.ReturnValueIgnoredError):
                self.run_gen(f)
    
        def test_sync_raise_return_value_tuple(self):
            @gen.engine
            def f():
                raise gen.Return((1, 2))
    
            with self.assertRaises(gen.ReturnValueIgnoredError):
                self.run_gen(f)
    
        def test_async_raise_return_value(self):
            @gen.engine
            def f():
                yield gen.Task(self.io_loop.add_callback)
                raise gen.Return(42)
    
            with self.assertRaises(gen.ReturnValueIgnoredError):
                self.run_gen(f)
    
        def test_async_raise_return_value_tuple(self):
            @gen.engine
            def f():
                yield gen.Task(self.io_loop.add_callback)
                raise gen.Return((1, 2))
    
            with self.assertRaises(gen.ReturnValueIgnoredError):
                self.run_gen(f)
    
        def test_return_value(self):
            # It is an error to apply @gen.engine to a function that returns
            # a value.
            @gen.engine
            def f():
                return 42
    
            with self.assertRaises(gen.ReturnValueIgnoredError):
                self.run_gen(f)
    
        def test_return_value_tuple(self):
            # It is an error to apply @gen.engine to a function that returns
            # a value.
            @gen.engine
            def f():
                return (1, 2)
    
            with self.assertRaises(gen.ReturnValueIgnoredError):
                self.run_gen(f)
    
        @skipNotCPython
        def test_task_refcounting(self):
            # On CPython, tasks and their arguments should be released immediately
            # without waiting for garbage collection.
            @gen.engine
            def f():
                class Foo(object):
                    pass
                arg = Foo()
                self.arg_ref = weakref.ref(arg)
                task = gen.Task(self.io_loop.add_callback, arg=arg)
                self.task_ref = weakref.ref(task)
                yield task
                self.stop()
    
            self.run_gen(f)
            self.assertIs(self.arg_ref(), None)
            self.assertIs(self.task_ref(), None)
    
    
    class GenCoroutineTest(AsyncTestCase):
        def setUp(self):
            # Stray StopIteration exceptions can lead to tests exiting prematurely,
            # so we need explicit checks here to make sure the tests run all
            # the way through.
            self.finished = False
            super(GenCoroutineTest, self).setUp()
    
        def tearDown(self):
            super(GenCoroutineTest, self).tearDown()
            assert self.finished
    
        def test_attributes(self):
            self.finished = True
    
            def f():
                yield gen.moment
    
            coro = gen.coroutine(f)
            self.assertEqual(coro.__name__, f.__name__)
            self.assertEqual(coro.__module__, f.__module__)
            self.assertIs(coro.__wrapped__, f)
    
        def test_is_coroutine_function(self):
            self.finished = True
    
            def f():
                yield gen.moment
    
            coro = gen.coroutine(f)
            self.assertFalse(gen.is_coroutine_function(f))
            self.assertTrue(gen.is_coroutine_function(coro))
            self.assertFalse(gen.is_coroutine_function(coro()))
    
        @gen_test
        def test_sync_gen_return(self):
            @gen.coroutine
            def f():
                raise gen.Return(42)
            result = yield f()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_async_gen_return(self):
            @gen.coroutine
            def f():
                yield gen.Task(self.io_loop.add_callback)
                raise gen.Return(42)
            result = yield f()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_sync_return(self):
            @gen.coroutine
            def f():
                return 42
            result = yield f()
            self.assertEqual(result, 42)
            self.finished = True
    
        @skipBefore33
        @gen_test
        def test_async_return(self):
            namespace = exec_test(globals(), locals(), """
            @gen.coroutine
            def f():
                yield gen.Task(self.io_loop.add_callback)
                return 42
            """)
            result = yield namespace['f']()
            self.assertEqual(result, 42)
            self.finished = True
    
        @skipBefore33
        @gen_test
        def test_async_early_return(self):
            # A yield statement exists but is not executed, which means
            # this function "returns" via an exception.  This exception
            # doesn't happen before the exception handling is set up.
            namespace = exec_test(globals(), locals(), """
            @gen.coroutine
            def f():
                if True:
                    return 42
                yield gen.Task(self.io_loop.add_callback)
            """)
            result = yield namespace['f']()
            self.assertEqual(result, 42)
            self.finished = True
    
        @skipBefore35
        @gen_test
        def test_async_await(self):
            # This test verifies that an async function can await a
            # yield-based gen.coroutine, and that a gen.coroutine
            # (the test method itself) can yield an async function.
            namespace = exec_test(globals(), locals(), """
            async def f():
                await gen.Task(self.io_loop.add_callback)
                return 42
            """)
            result = yield namespace['f']()
            self.assertEqual(result, 42)
            self.finished = True
    
        @skipBefore35
        @gen_test
        def test_asyncio_sleep_zero(self):
            # asyncio.sleep(0) turns into a special case (equivalent to
            # `yield None`)
            namespace = exec_test(globals(), locals(), """
            async def f():
                import asyncio
                await asyncio.sleep(0)
                return 42
            """)
            result = yield namespace['f']()
            self.assertEqual(result, 42)
            self.finished = True
    
        @skipBefore35
        @gen_test
        def test_async_await_mixed_multi_native_future(self):
            namespace = exec_test(globals(), locals(), """
            async def f1():
                await gen.Task(self.io_loop.add_callback)
                return 42
            """)
    
            @gen.coroutine
            def f2():
                yield gen.Task(self.io_loop.add_callback)
                raise gen.Return(43)
    
            results = yield [namespace['f1'](), f2()]
            self.assertEqual(results, [42, 43])
            self.finished = True
    
        @skipBefore35
        @gen_test
        def test_async_await_mixed_multi_native_yieldpoint(self):
            namespace = exec_test(globals(), locals(), """
            async def f1():
                await gen.Task(self.io_loop.add_callback)
                return 42
            """)
    
            @gen.coroutine
            def f2():
                yield gen.Task(self.io_loop.add_callback)
                raise gen.Return(43)
    
            f2(callback=(yield gen.Callback('cb')))
            results = yield [namespace['f1'](), gen.Wait('cb')]
            self.assertEqual(results, [42, 43])
            self.finished = True
    
        @skipBefore35
        @gen_test
        def test_async_with_timeout(self):
            namespace = exec_test(globals(), locals(), """
            async def f1():
                return 42
            """)
    
            result = yield gen.with_timeout(datetime.timedelta(hours=1),
                                            namespace['f1']())
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_sync_return_no_value(self):
            @gen.coroutine
            def f():
                return
            result = yield f()
            self.assertEqual(result, None)
            self.finished = True
    
        @gen_test
        def test_async_return_no_value(self):
            # Without a return value we don't need python 3.3.
            @gen.coroutine
            def f():
                yield gen.Task(self.io_loop.add_callback)
                return
            result = yield f()
            self.assertEqual(result, None)
            self.finished = True
    
        @gen_test
        def test_sync_raise(self):
            @gen.coroutine
            def f():
                1 / 0
            # The exception is raised when the future is yielded
            # (or equivalently when its result method is called),
            # not when the function itself is called).
            future = f()
            with self.assertRaises(ZeroDivisionError):
                yield future
            self.finished = True
    
        @gen_test
        def test_async_raise(self):
            @gen.coroutine
            def f():
                yield gen.Task(self.io_loop.add_callback)
                1 / 0
            future = f()
            with self.assertRaises(ZeroDivisionError):
                yield future
            self.finished = True
    
        @gen_test
        def test_pass_callback(self):
            @gen.coroutine
            def f():
                raise gen.Return(42)
            result = yield gen.Task(f)
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_replace_yieldpoint_exception(self):
            # Test exception handling: a coroutine can catch one exception
            # raised by a yield point and raise a different one.
            @gen.coroutine
            def f1():
                1 / 0
    
            @gen.coroutine
            def f2():
                try:
                    yield f1()
                except ZeroDivisionError:
                    raise KeyError()
    
            future = f2()
            with self.assertRaises(KeyError):
                yield future
            self.finished = True
    
        @gen_test
        def test_swallow_yieldpoint_exception(self):
            # Test exception handling: a coroutine can catch an exception
            # raised by a yield point and not raise a different one.
            @gen.coroutine
            def f1():
                1 / 0
    
            @gen.coroutine
            def f2():
                try:
                    yield f1()
                except ZeroDivisionError:
                    raise gen.Return(42)
    
            result = yield f2()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_replace_context_exception(self):
            # Test exception handling: exceptions thrown into the stack context
            # can be caught and replaced.
            # Note that this test and the following are for behavior that is
            # not really supported any more:  coroutines no longer create a
            # stack context automatically; but one is created after the first
            # YieldPoint (i.e. not a Future).
            @gen.coroutine
            def f2():
                (yield gen.Callback(1))()
                yield gen.Wait(1)
                self.io_loop.add_callback(lambda: 1 / 0)
                try:
                    yield gen.Task(self.io_loop.add_timeout,
                                   self.io_loop.time() + 10)
                except ZeroDivisionError:
                    raise KeyError()
    
            future = f2()
            with self.assertRaises(KeyError):
                yield future
            self.finished = True
    
        @gen_test
        def test_swallow_context_exception(self):
            # Test exception handling: exceptions thrown into the stack context
            # can be caught and ignored.
            @gen.coroutine
            def f2():
                (yield gen.Callback(1))()
                yield gen.Wait(1)
                self.io_loop.add_callback(lambda: 1 / 0)
                try:
                    yield gen.Task(self.io_loop.add_timeout,
                                   self.io_loop.time() + 10)
                except ZeroDivisionError:
                    raise gen.Return(42)
    
            result = yield f2()
            self.assertEqual(result, 42)
            self.finished = True
    
        @gen_test
        def test_moment(self):
            calls = []
    
            @gen.coroutine
            def f(name, yieldable):
                for i in range(5):
                    calls.append(name)
                    yield yieldable
            # First, confirm the behavior without moment: each coroutine
            # monopolizes the event loop until it finishes.
            immediate = Future()
            immediate.set_result(None)
            yield [f('a', immediate), f('b', immediate)]
            self.assertEqual(''.join(calls), 'aaaaabbbbb')
    
            # With moment, they take turns.
            calls = []
            yield [f('a', gen.moment), f('b', gen.moment)]
            self.assertEqual(''.join(calls), 'ababababab')
            self.finished = True
    
            calls = []
            yield [f('a', gen.moment), f('b', immediate)]
            self.assertEqual(''.join(calls), 'abbbbbaaaa')
    
        @gen_test
        def test_sleep(self):
            yield gen.sleep(0.01)
            self.finished = True
    
        @skipBefore33
        @gen_test
        def test_py3_leak_exception_context(self):
            class LeakedException(Exception):
                pass
    
            @gen.coroutine
            def inner(iteration):
                raise LeakedException(iteration)
    
            try:
                yield inner(1)
            except LeakedException as e:
                self.assertEqual(str(e), "1")
                self.assertIsNone(e.__context__)
    
            try:
                yield inner(2)
            except LeakedException as e:
                self.assertEqual(str(e), "2")
                self.assertIsNone(e.__context__)
    
            self.finished = True
    
        @skipNotCPython
        def test_coroutine_refcounting(self):
            # On CPython, tasks and their arguments should be released immediately
            # without waiting for garbage collection.
            @gen.coroutine
            def inner():
                class Foo(object):
                    pass
                local_var = Foo()
                self.local_ref = weakref.ref(local_var)
                yield gen.coroutine(lambda: None)()
                raise ValueError('Some error')
    
            @gen.coroutine
            def inner2():
                try:
                    yield inner()
                except ValueError:
                    pass
    
            self.io_loop.run_sync(inner2, timeout=3)
    
            self.assertIs(self.local_ref(), None)
            self.finished = True
    
    
    class GenSequenceHandler(RequestHandler):
        @asynchronous
        @gen.engine
        def get(self):
            self.io_loop = self.request.connection.stream.io_loop
            self.io_loop.add_callback((yield gen.Callback("k1")))
            yield gen.Wait("k1")
            self.write("1")
            self.io_loop.add_callback((yield gen.Callback("k2")))
            yield gen.Wait("k2")
            self.write("2")
            # reuse an old key
            self.io_loop.add_callback((yield gen.Callback("k1")))
            yield gen.Wait("k1")
            self.finish("3")
    
    
    class GenCoroutineSequenceHandler(RequestHandler):
        @gen.coroutine
        def get(self):
            self.io_loop = self.request.connection.stream.io_loop
            self.io_loop.add_callback((yield gen.Callback("k1")))
            yield gen.Wait("k1")
            self.write("1")
            self.io_loop.add_callback((yield gen.Callback("k2")))
            yield gen.Wait("k2")
            self.write("2")
            # reuse an old key
            self.io_loop.add_callback((yield gen.Callback("k1")))
            yield gen.Wait("k1")
            self.finish("3")
    
    
    class GenCoroutineUnfinishedSequenceHandler(RequestHandler):
        @asynchronous
        @gen.coroutine
        def get(self):
            self.io_loop = self.request.connection.stream.io_loop
            self.io_loop.add_callback((yield gen.Callback("k1")))
            yield gen.Wait("k1")
            self.write("1")
            self.io_loop.add_callback((yield gen.Callback("k2")))
            yield gen.Wait("k2")
            self.write("2")
            # reuse an old key
            self.io_loop.add_callback((yield gen.Callback("k1")))
            yield gen.Wait("k1")
            # just write, don't finish
            self.write("3")
    
    
    class GenTaskHandler(RequestHandler):
        @asynchronous
        @gen.engine
        def get(self):
            io_loop = self.request.connection.stream.io_loop
            client = AsyncHTTPClient(io_loop=io_loop)
            response = yield gen.Task(client.fetch, self.get_argument('url'))
            response.rethrow()
            self.finish(b"got response: " + response.body)
    
    
    class GenExceptionHandler(RequestHandler):
        @asynchronous
        @gen.engine
        def get(self):
            # This test depends on the order of the two decorators.
            io_loop = self.request.connection.stream.io_loop
            yield gen.Task(io_loop.add_callback)
            raise Exception("oops")
    
    
    class GenCoroutineExceptionHandler(RequestHandler):
        @gen.coroutine
        def get(self):
            # This test depends on the order of the two decorators.
            io_loop = self.request.connection.stream.io_loop
            yield gen.Task(io_loop.add_callback)
            raise Exception("oops")
    
    
    class GenYieldExceptionHandler(RequestHandler):
        @asynchronous
        @gen.engine
        def get(self):
            io_loop = self.request.connection.stream.io_loop
            # Test the interaction of the two stack_contexts.
    
            def fail_task(callback):
                io_loop.add_callback(lambda: 1 / 0)
            try:
                yield gen.Task(fail_task)
                raise Exception("did not get expected exception")
            except ZeroDivisionError:
                self.finish('ok')
    
    
    # "Undecorated" here refers to the absence of @asynchronous.
    class UndecoratedCoroutinesHandler(RequestHandler):
        @gen.coroutine
        def prepare(self):
            self.chunks = []
            yield gen.Task(IOLoop.current().add_callback)
            self.chunks.append('1')
    
        @gen.coroutine
        def get(self):
            self.chunks.append('2')
            yield gen.Task(IOLoop.current().add_callback)
            self.chunks.append('3')
            yield gen.Task(IOLoop.current().add_callback)
            self.write(''.join(self.chunks))
    
    
    class AsyncPrepareErrorHandler(RequestHandler):
        @gen.coroutine
        def prepare(self):
            yield gen.Task(IOLoop.current().add_callback)
            raise HTTPError(403)
    
        def get(self):
            self.finish('ok')
    
    
    class NativeCoroutineHandler(RequestHandler):
        if sys.version_info > (3, 5):
            exec(textwrap.dedent("""
            async def get(self):
                await gen.Task(IOLoop.current().add_callback)
                self.write("ok")
            """))
    
    
    class GenWebTest(AsyncHTTPTestCase):
        def get_app(self):
            return Application([
                ('/sequence', GenSequenceHandler),
                ('/coroutine_sequence', GenCoroutineSequenceHandler),
                ('/coroutine_unfinished_sequence',
                 GenCoroutineUnfinishedSequenceHandler),
                ('/task', GenTaskHandler),
                ('/exception', GenExceptionHandler),
                ('/coroutine_exception', GenCoroutineExceptionHandler),
                ('/yield_exception', GenYieldExceptionHandler),
                ('/undecorated_coroutine', UndecoratedCoroutinesHandler),
                ('/async_prepare_error', AsyncPrepareErrorHandler),
                ('/native_coroutine', NativeCoroutineHandler),
            ])
    
        def test_sequence_handler(self):
            response = self.fetch('/sequence')
            self.assertEqual(response.body, b"123")
    
        def test_coroutine_sequence_handler(self):
            response = self.fetch('/coroutine_sequence')
            self.assertEqual(response.body, b"123")
    
        def test_coroutine_unfinished_sequence_handler(self):
            response = self.fetch('/coroutine_unfinished_sequence')
            self.assertEqual(response.body, b"123")
    
        def test_task_handler(self):
            response = self.fetch('/task?url=%s' % url_escape(self.get_url('/sequence')))
            self.assertEqual(response.body, b"got response: 123")
    
        def test_exception_handler(self):
            # Make sure we get an error and not a timeout
            with ExpectLog(app_log, "Uncaught exception GET /exception"):
                response = self.fetch('/exception')
            self.assertEqual(500, response.code)
    
        def test_coroutine_exception_handler(self):
            # Make sure we get an error and not a timeout
            with ExpectLog(app_log, "Uncaught exception GET /coroutine_exception"):
                response = self.fetch('/coroutine_exception')
            self.assertEqual(500, response.code)
    
        def test_yield_exception_handler(self):
            response = self.fetch('/yield_exception')
            self.assertEqual(response.body, b'ok')
    
        def test_undecorated_coroutines(self):
            response = self.fetch('/undecorated_coroutine')
            self.assertEqual(response.body, b'123')
    
        def test_async_prepare_error_handler(self):
            response = self.fetch('/async_prepare_error')
            self.assertEqual(response.code, 403)
    
        @skipBefore35
        def test_native_coroutine_handler(self):
            response = self.fetch('/native_coroutine')
            self.assertEqual(response.code, 200)
            self.assertEqual(response.body, b'ok')
    
    
    class WithTimeoutTest(AsyncTestCase):
        @gen_test
        def test_timeout(self):
            with self.assertRaises(gen.TimeoutError):
                yield gen.with_timeout(datetime.timedelta(seconds=0.1),
                                       Future())
    
        @gen_test
        def test_completes_before_timeout(self):
            future = Future()
            self.io_loop.add_timeout(datetime.timedelta(seconds=0.1),
                                     lambda: future.set_result('asdf'))
            result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
                                            future, io_loop=self.io_loop)
            self.assertEqual(result, 'asdf')
    
        @gen_test
        def test_fails_before_timeout(self):
            future = Future()
            self.io_loop.add_timeout(
                datetime.timedelta(seconds=0.1),
                lambda: future.set_exception(ZeroDivisionError()))
            with self.assertRaises(ZeroDivisionError):
                yield gen.with_timeout(datetime.timedelta(seconds=3600),
                                       future, io_loop=self.io_loop)
    
        @gen_test
        def test_already_resolved(self):
            future = Future()
            future.set_result('asdf')
            result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
                                            future, io_loop=self.io_loop)
            self.assertEqual(result, 'asdf')
    
        @unittest.skipIf(futures is None, 'futures module not present')
        @gen_test
        def test_timeout_concurrent_future(self):
            with futures.ThreadPoolExecutor(1) as executor:
                with self.assertRaises(gen.TimeoutError):
                    yield gen.with_timeout(self.io_loop.time(),
                                           executor.submit(time.sleep, 0.1))
    
        @unittest.skipIf(futures is None, 'futures module not present')
        @gen_test
        def test_completed_concurrent_future(self):
            with futures.ThreadPoolExecutor(1) as executor:
                yield gen.with_timeout(datetime.timedelta(seconds=3600),
                                       executor.submit(lambda: None))
    
    
    class WaitIteratorTest(AsyncTestCase):
        @gen_test
        def test_empty_iterator(self):
            g = gen.WaitIterator()
            self.assertTrue(g.done(), 'empty generator iterated')
    
            with self.assertRaises(ValueError):
                g = gen.WaitIterator(False, bar=False)
    
            self.assertEqual(g.current_index, None, "bad nil current index")
            self.assertEqual(g.current_future, None, "bad nil current future")
    
        @gen_test
        def test_already_done(self):
            f1 = Future()
            f2 = Future()
            f3 = Future()
            f1.set_result(24)
            f2.set_result(42)
            f3.set_result(84)
    
            g = gen.WaitIterator(f1, f2, f3)
            i = 0
            while not g.done():
                r = yield g.next()
                # Order is not guaranteed, but the current implementation
                # preserves ordering of already-done Futures.
                if i == 0:
                    self.assertEqual(g.current_index, 0)
                    self.assertIs(g.current_future, f1)
                    self.assertEqual(r, 24)
                elif i == 1:
                    self.assertEqual(g.current_index, 1)
                    self.assertIs(g.current_future, f2)
                    self.assertEqual(r, 42)
                elif i == 2:
                    self.assertEqual(g.current_index, 2)
                    self.assertIs(g.current_future, f3)
                    self.assertEqual(r, 84)
                i += 1
    
            self.assertEqual(g.current_index, None, "bad nil current index")
            self.assertEqual(g.current_future, None, "bad nil current future")
    
            dg = gen.WaitIterator(f1=f1, f2=f2)
    
            while not dg.done():
                dr = yield dg.next()
                if dg.current_index == "f1":
                    self.assertTrue(dg.current_future == f1 and dr == 24,
                                    "WaitIterator dict status incorrect")
                elif dg.current_index == "f2":
                    self.assertTrue(dg.current_future == f2 and dr == 42,
                                    "WaitIterator dict status incorrect")
                else:
                    self.fail("got bad WaitIterator index {}".format(
                        dg.current_index))
    
                i += 1
    
            self.assertEqual(dg.current_index, None, "bad nil current index")
            self.assertEqual(dg.current_future, None, "bad nil current future")
    
        def finish_coroutines(self, iteration, futures):
            if iteration == 3:
                futures[2].set_result(24)
            elif iteration == 5:
                futures[0].set_exception(ZeroDivisionError())
            elif iteration == 8:
                futures[1].set_result(42)
                futures[3].set_result(84)
    
            if iteration < 8:
                self.io_loop.add_callback(self.finish_coroutines, iteration + 1, futures)
    
        @gen_test
        def test_iterator(self):
            futures = [Future(), Future(), Future(), Future()]
    
            self.finish_coroutines(0, futures)
    
            g = gen.WaitIterator(*futures)
    
            i = 0
            while not g.done():
                try:
                    r = yield g.next()
                except ZeroDivisionError:
                    self.assertIs(g.current_future, futures[0],
                                  'exception future invalid')
                else:
                    if i == 0:
                        self.assertEqual(r, 24, 'iterator value incorrect')
                        self.assertEqual(g.current_index, 2, 'wrong index')
                    elif i == 2:
                        self.assertEqual(r, 42, 'iterator value incorrect')
                        self.assertEqual(g.current_index, 1, 'wrong index')
                    elif i == 3:
                        self.assertEqual(r, 84, 'iterator value incorrect')
                        self.assertEqual(g.current_index, 3, 'wrong index')
                i += 1
    
        @skipBefore35
        @gen_test
        def test_iterator_async_await(self):
            # Recreate the previous test with py35 syntax. It's a little clunky
            # because of the way the previous test handles an exception on
            # a single iteration.
            futures = [Future(), Future(), Future(), Future()]
            self.finish_coroutines(0, futures)
            self.finished = False
    
            namespace = exec_test(globals(), locals(), """
            async def f():
                i = 0
                g = gen.WaitIterator(*futures)
                try:
                    async for r in g:
                        if i == 0:
                            self.assertEqual(r, 24, 'iterator value incorrect')
                            self.assertEqual(g.current_index, 2, 'wrong index')
                        else:
                            raise Exception("expected exception on iteration 1")
                        i += 1
                except ZeroDivisionError:
                    i += 1
                async for r in g:
                    if i == 2:
                        self.assertEqual(r, 42, 'iterator value incorrect')
                        self.assertEqual(g.current_index, 1, 'wrong index')
                    elif i == 3:
                        self.assertEqual(r, 84, 'iterator value incorrect')
                        self.assertEqual(g.current_index, 3, 'wrong index')
                    else:
                        raise Exception("didn't expect iteration %d" % i)
                    i += 1
                self.finished = True
            """)
            yield namespace['f']()
            self.assertTrue(self.finished)
    
        @gen_test
        def test_no_ref(self):
            # In this usage, there is no direct hard reference to the
            # WaitIterator itself, only the Future it returns. Since
            # WaitIterator uses weak references internally to improve GC
            # performance, this used to cause problems.
            yield gen.with_timeout(datetime.timedelta(seconds=0.1),
                                   gen.WaitIterator(gen.sleep(0)).next())
    
    
    class RunnerGCTest(AsyncTestCase):
        """Github issue 1769: Runner objects can get GCed unexpectedly"""
        @gen_test
        def test_gc(self):
            """Runners shouldn't GC if future is alive"""
            # Create the weakref
            weakref_scope = [None]
    
            def callback():
                gc.collect(2)
                weakref_scope[0]().set_result(123)
    
            @gen.coroutine
            def tester():
                fut = Future()
                weakref_scope[0] = weakref.ref(fut)
                self.io_loop.add_callback(callback)
                yield fut
    
            yield gen.with_timeout(
                datetime.timedelta(seconds=0.2),
                tester()
            )
    
    
    if __name__ == '__main__':
        unittest.main()
    tornado-4.5.3/tornado/test/gettext_translations/000077500000000000000000000000001322420601000220715ustar00rootroot00000000000000tornado-4.5.3/tornado/test/gettext_translations/extract_me.py000066400000000000000000000013211322420601000245730ustar00rootroot00000000000000# flake8: noqa
    # Dummy source file to allow creation of the initial .po file in the
    # same way as a real project.  I'm not entirely sure about the real
    # workflow here, but this seems to work.
    #
    # 1) xgettext --language=Python --keyword=_:1,2 --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3 extract_me.py -o tornado_test.po
    # 2) Edit tornado_test.po, setting CHARSET, Plural-Forms and setting msgstr
    # 3) msgfmt tornado_test.po -o tornado_test.mo
    # 4) Put the file in the proper location: $LANG/LC_MESSAGES
    
    from __future__ import absolute_import, division, print_function
    _("school")
    pgettext("law", "right")
    pgettext("good", "right")
    pgettext("organization", "club", "clubs", 1)
    pgettext("stick", "club", "clubs", 1)
    tornado-4.5.3/tornado/test/gettext_translations/fr_FR/000077500000000000000000000000001322420601000230675ustar00rootroot00000000000000tornado-4.5.3/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/000077500000000000000000000000001322420601000246545ustar00rootroot00000000000000tornado-4.5.3/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo000066400000000000000000000012311322420601000277130ustar00rootroot00000000000000Þ•L|¨
    ©	´¾ÖÝjîYaj|ƒgoodrightlawrightorganizationclubclubsschoolstickclubclubsProject-Id-Version: PACKAGE VERSION
    Report-Msgid-Bugs-To: 
    POT-Creation-Date: 2015-01-27 11:05+0300
    PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
    Last-Translator: FULL NAME 
    Language-Team: LANGUAGE 
    Language: 
    MIME-Version: 1.0
    Content-Type: text/plain; charset=utf-8
    Content-Transfer-Encoding: 8bit
    Plural-Forms: nplurals=2; plural=(n > 1);
    le bienle droitle clubles clubsécolele bâtonles bâtonstornado-4.5.3/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po000066400000000000000000000020311322420601000277150ustar00rootroot00000000000000# SOME DESCRIPTIVE TITLE.
    # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
    # This file is distributed under the same license as the PACKAGE package.
    # FIRST AUTHOR , YEAR.
    #
    #, fuzzy
    msgid ""
    msgstr ""
    "Project-Id-Version: PACKAGE VERSION\n"
    "Report-Msgid-Bugs-To: \n"
    "POT-Creation-Date: 2015-01-27 11:05+0300\n"
    "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
    "Last-Translator: FULL NAME \n"
    "Language-Team: LANGUAGE \n"
    "Language: \n"
    "MIME-Version: 1.0\n"
    "Content-Type: text/plain; charset=utf-8\n"
    "Content-Transfer-Encoding: 8bit\n"
    "Plural-Forms: nplurals=2; plural=(n > 1);\n"
    
    #: extract_me.py:11
    msgid "school"
    msgstr "école"
    
    #: extract_me.py:12
    msgctxt "law"
    msgid "right"
    msgstr "le droit"
    
    #: extract_me.py:13
    msgctxt "good"
    msgid "right"
    msgstr "le bien"
    
    #: extract_me.py:14
    msgctxt "organization"
    msgid "club"
    msgid_plural "clubs"
    msgstr[0] "le club"
    msgstr[1] "les clubs"
    
    #: extract_me.py:15
    msgctxt "stick"
    msgid "club"
    msgid_plural "clubs"
    msgstr[0] "le bâton"
    msgstr[1] "les bâtons"
    tornado-4.5.3/tornado/test/http1connection_test.py000066400000000000000000000037131322420601000223410ustar00rootroot00000000000000from __future__ import absolute_import, division, print_function
    
    import socket
    
    from tornado.http1connection import HTTP1Connection
    from tornado.httputil import HTTPMessageDelegate
    from tornado.iostream import IOStream
    from tornado.locks import Event
    from tornado.netutil import add_accept_handler
    from tornado.testing import AsyncTestCase, bind_unused_port, gen_test
    
    
    class HTTP1ConnectionTest(AsyncTestCase):
        def setUp(self):
            super(HTTP1ConnectionTest, self).setUp()
            self.asyncSetUp()
    
        @gen_test
        def asyncSetUp(self):
            listener, port = bind_unused_port()
            event = Event()
    
            def accept_callback(conn, addr):
                self.server_stream = IOStream(conn)
                self.addCleanup(self.server_stream.close)
                event.set()
    
            add_accept_handler(listener, accept_callback)
            self.client_stream = IOStream(socket.socket())
            self.addCleanup(self.client_stream.close)
            yield [self.client_stream.connect(('127.0.0.1', port)),
                   event.wait()]
            self.io_loop.remove_handler(listener)
            listener.close()
    
        @gen_test
        def test_http10_no_content_length(self):
            # Regression test for a bug in which can_keep_alive would crash
            # for an HTTP/1.0 (not 1.1) response with no content-length.
            conn = HTTP1Connection(self.client_stream, True)
            self.server_stream.write(b"HTTP/1.0 200 Not Modified\r\n\r\nhello")
            self.server_stream.close()
    
            event = Event()
            test = self
            body = []
    
            class Delegate(HTTPMessageDelegate):
                def headers_received(self, start_line, headers):
                    test.code = start_line.code
    
                def data_received(self, data):
                    body.append(data)
    
                def finish(self):
                    event.set()
    
            yield conn.read_response(Delegate())
            yield event.wait()
            self.assertEqual(self.code, 200)
            self.assertEqual(b''.join(body), b'hello')
    tornado-4.5.3/tornado/test/httpclient_test.py000066400000000000000000000652351322420601000214060ustar00rootroot00000000000000#!/usr/bin/env python
    
    from __future__ import absolute_import, division, print_function
    
    import base64
    import binascii
    from contextlib import closing
    import copy
    import functools
    import sys
    import threading
    import datetime
    from io import BytesIO
    
    from tornado.escape import utf8, native_str
    from tornado import gen
    from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
    from tornado.httpserver import HTTPServer
    from tornado.ioloop import IOLoop
    from tornado.iostream import IOStream
    from tornado.log import gen_log
    from tornado import netutil
    from tornado.stack_context import ExceptionStackContext, NullContext
    from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
    from tornado.test.util import unittest, skipOnTravis
    from tornado.web import Application, RequestHandler, url
    from tornado.httputil import format_timestamp, HTTPHeaders
    
    
    class HelloWorldHandler(RequestHandler):
        def get(self):
            name = self.get_argument("name", "world")
            self.set_header("Content-Type", "text/plain")
            self.finish("Hello %s!" % name)
    
    
    class PostHandler(RequestHandler):
        def post(self):
            self.finish("Post arg1: %s, arg2: %s" % (
                self.get_argument("arg1"), self.get_argument("arg2")))
    
    
    class PutHandler(RequestHandler):
        def put(self):
            self.write("Put body: ")
            self.write(self.request.body)
    
    
    class RedirectHandler(RequestHandler):
        def prepare(self):
            self.write('redirects can have bodies too')
            self.redirect(self.get_argument("url"),
                          status=int(self.get_argument("status", "302")))
    
    
    class ChunkHandler(RequestHandler):
        @gen.coroutine
        def get(self):
            self.write("asdf")
            self.flush()
            # Wait a bit to ensure the chunks are sent and received separately.
            yield gen.sleep(0.01)
            self.write("qwer")
    
    
    class AuthHandler(RequestHandler):
        def get(self):
            self.finish(self.request.headers["Authorization"])
    
    
    class CountdownHandler(RequestHandler):
        def get(self, count):
            count = int(count)
            if count > 0:
                self.redirect(self.reverse_url("countdown", count - 1))
            else:
                self.write("Zero")
    
    
    class EchoPostHandler(RequestHandler):
        def post(self):
            self.write(self.request.body)
    
    
    class UserAgentHandler(RequestHandler):
        def get(self):
            self.write(self.request.headers.get('User-Agent', 'User agent not set'))
    
    
    class ContentLength304Handler(RequestHandler):
        def get(self):
            self.set_status(304)
            self.set_header('Content-Length', 42)
    
        def _clear_headers_for_304(self):
            # Tornado strips content-length from 304 responses, but here we
            # want to simulate servers that include the headers anyway.
            pass
    
    
    class PatchHandler(RequestHandler):
    
        def patch(self):
            "Return the request payload - so we can check it is being kept"
            self.write(self.request.body)
    
    
    class AllMethodsHandler(RequestHandler):
        SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
    
        def method(self):
            self.write(self.request.method)
    
        get = post = put = delete = options = patch = other = method
    
    
    class SetHeaderHandler(RequestHandler):
        def get(self):
            # Use get_arguments for keys to get strings, but
            # request.arguments for values to get bytes.
            for k, v in zip(self.get_arguments('k'),
                            self.request.arguments['v']):
                self.set_header(k, v)
    
    # These tests end up getting run redundantly: once here with the default
    # HTTPClient implementation, and then again in each implementation's own
    # test suite.
    
    
    class HTTPClientCommonTestCase(AsyncHTTPTestCase):
        def get_app(self):
            return Application([
                url("/hello", HelloWorldHandler),
                url("/post", PostHandler),
                url("/put", PutHandler),
                url("/redirect", RedirectHandler),
                url("/chunk", ChunkHandler),
                url("/auth", AuthHandler),
                url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
                url("/echopost", EchoPostHandler),
                url("/user_agent", UserAgentHandler),
                url("/304_with_content_length", ContentLength304Handler),
                url("/all_methods", AllMethodsHandler),
                url('/patch', PatchHandler),
                url('/set_header', SetHeaderHandler),
            ], gzip=True)
    
        def test_patch_receives_payload(self):
            body = b"some patch data"
            response = self.fetch("/patch", method='PATCH', body=body)
            self.assertEqual(response.code, 200)
            self.assertEqual(response.body, body)
    
        @skipOnTravis
        def test_hello_world(self):
            response = self.fetch("/hello")
            self.assertEqual(response.code, 200)
            self.assertEqual(response.headers["Content-Type"], "text/plain")
            self.assertEqual(response.body, b"Hello world!")
            self.assertEqual(int(response.request_time), 0)
    
            response = self.fetch("/hello?name=Ben")
            self.assertEqual(response.body, b"Hello Ben!")
    
        def test_streaming_callback(self):
            # streaming_callback is also tested in test_chunked
            chunks = []
            response = self.fetch("/hello",
                                  streaming_callback=chunks.append)
            # with streaming_callback, data goes to the callback and not response.body
            self.assertEqual(chunks, [b"Hello world!"])
            self.assertFalse(response.body)
    
        def test_post(self):
            response = self.fetch("/post", method="POST",
                                  body="arg1=foo&arg2=bar")
            self.assertEqual(response.code, 200)
            self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
    
        def test_chunked(self):
            response = self.fetch("/chunk")
            self.assertEqual(response.body, b"asdfqwer")
    
            chunks = []
            response = self.fetch("/chunk",
                                  streaming_callback=chunks.append)
            self.assertEqual(chunks, [b"asdf", b"qwer"])
            self.assertFalse(response.body)
    
        def test_chunked_close(self):
            # test case in which chunks spread read-callback processing
            # over several ioloop iterations, but the connection is already closed.
            sock, port = bind_unused_port()
            with closing(sock):
                def write_response(stream, request_data):
                    if b"HTTP/1." not in request_data:
                        self.skipTest("requires HTTP/1.x")
                    stream.write(b"""\
    HTTP/1.1 200 OK
    Transfer-Encoding: chunked
    
    1
    1
    1
    2
    0
    
    """.replace(b"\n", b"\r\n"), callback=stream.close)
    
                def accept_callback(conn, address):
                    # fake an HTTP server using chunked encoding where the final chunks
                    # and connection close all happen at once
                    stream = IOStream(conn, io_loop=self.io_loop)
                    stream.read_until(b"\r\n\r\n",
                                      functools.partial(write_response, stream))
                netutil.add_accept_handler(sock, accept_callback, self.io_loop)
                self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
                resp = self.wait()
                resp.rethrow()
                self.assertEqual(resp.body, b"12")
                self.io_loop.remove_handler(sock.fileno())
    
        def test_streaming_stack_context(self):
            chunks = []
            exc_info = []
    
            def error_handler(typ, value, tb):
                exc_info.append((typ, value, tb))
                return True
    
            def streaming_cb(chunk):
                chunks.append(chunk)
                if chunk == b'qwer':
                    1 / 0
    
            with ExceptionStackContext(error_handler):
                self.fetch('/chunk', streaming_callback=streaming_cb)
    
            self.assertEqual(chunks, [b'asdf', b'qwer'])
            self.assertEqual(1, len(exc_info))
            self.assertIs(exc_info[0][0], ZeroDivisionError)
    
        def test_basic_auth(self):
            self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
                                        auth_password="open sesame").body,
                             b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
    
        def test_basic_auth_explicit_mode(self):
            self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
                                        auth_password="open sesame",
                                        auth_mode="basic").body,
                             b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
    
        def test_unsupported_auth_mode(self):
            # curl and simple clients handle errors a bit differently; the
            # important thing is that they don't fall back to basic auth
            # on an unknown mode.
            with ExpectLog(gen_log, "uncaught exception", required=False):
                with self.assertRaises((ValueError, HTTPError)):
                    response = self.fetch("/auth", auth_username="Aladdin",
                                          auth_password="open sesame",
                                          auth_mode="asdf")
                    response.rethrow()
    
        def test_follow_redirect(self):
            response = self.fetch("/countdown/2", follow_redirects=False)
            self.assertEqual(302, response.code)
            self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
    
            response = self.fetch("/countdown/2")
            self.assertEqual(200, response.code)
            self.assertTrue(response.effective_url.endswith("/countdown/0"))
            self.assertEqual(b"Zero", response.body)
    
        def test_credentials_in_url(self):
            url = self.get_url("/auth").replace("http://", "http://me:secret@")
            self.http_client.fetch(url, self.stop)
            response = self.wait()
            self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
                             response.body)
    
        def test_body_encoding(self):
            unicode_body = u"\xe9"
            byte_body = binascii.a2b_hex(b"e9")
    
            # unicode string in body gets converted to utf8
            response = self.fetch("/echopost", method="POST", body=unicode_body,
                                  headers={"Content-Type": "application/blah"})
            self.assertEqual(response.headers["Content-Length"], "2")
            self.assertEqual(response.body, utf8(unicode_body))
    
            # byte strings pass through directly
            response = self.fetch("/echopost", method="POST",
                                  body=byte_body,
                                  headers={"Content-Type": "application/blah"})
            self.assertEqual(response.headers["Content-Length"], "1")
            self.assertEqual(response.body, byte_body)
    
            # Mixing unicode in headers and byte string bodies shouldn't
            # break anything
            response = self.fetch("/echopost", method="POST", body=byte_body,
                                  headers={"Content-Type": "application/blah"},
                                  user_agent=u"foo")
            self.assertEqual(response.headers["Content-Length"], "1")
            self.assertEqual(response.body, byte_body)
    
        def test_types(self):
            response = self.fetch("/hello")
            self.assertEqual(type(response.body), bytes)
            self.assertEqual(type(response.headers["Content-Type"]), str)
            self.assertEqual(type(response.code), int)
            self.assertEqual(type(response.effective_url), str)
    
        def test_header_callback(self):
            first_line = []
            headers = {}
            chunks = []
    
            def header_callback(header_line):
                if header_line.startswith('HTTP/1.1 101'):
                    # Upgrading to HTTP/2
                    pass
                elif header_line.startswith('HTTP/'):
                    first_line.append(header_line)
                elif header_line != '\r\n':
                    k, v = header_line.split(':', 1)
                    headers[k.lower()] = v.strip()
    
            def streaming_callback(chunk):
                # All header callbacks are run before any streaming callbacks,
                # so the header data is available to process the data as it
                # comes in.
                self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
                chunks.append(chunk)
    
            self.fetch('/chunk', header_callback=header_callback,
                       streaming_callback=streaming_callback)
            self.assertEqual(len(first_line), 1, first_line)
            self.assertRegexpMatches(first_line[0], 'HTTP/[0-9]\\.[0-9] 200.*\r\n')
            self.assertEqual(chunks, [b'asdf', b'qwer'])
    
        def test_header_callback_stack_context(self):
            exc_info = []
    
            def error_handler(typ, value, tb):
                exc_info.append((typ, value, tb))
                return True
    
            def header_callback(header_line):
                if header_line.lower().startswith('content-type:'):
                    1 / 0
    
            with ExceptionStackContext(error_handler):
                self.fetch('/chunk', header_callback=header_callback)
            self.assertEqual(len(exc_info), 1)
            self.assertIs(exc_info[0][0], ZeroDivisionError)
    
        def test_configure_defaults(self):
            defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
            # Construct a new instance of the configured client class
            client = self.http_client.__class__(self.io_loop, force_instance=True,
                                                defaults=defaults)
            try:
                client.fetch(self.get_url('/user_agent'), callback=self.stop)
                response = self.wait()
                self.assertEqual(response.body, b'TestDefaultUserAgent')
            finally:
                client.close()
    
        def test_header_types(self):
            # Header values may be passed as character or utf8 byte strings,
            # in a plain dictionary or an HTTPHeaders object.
            # Keys must always be the native str type.
            # All combinations should have the same results on the wire.
            for value in [u"MyUserAgent", b"MyUserAgent"]:
                for container in [dict, HTTPHeaders]:
                    headers = container()
                    headers['User-Agent'] = value
                    resp = self.fetch('/user_agent', headers=headers)
                    self.assertEqual(
                        resp.body, b"MyUserAgent",
                        "response=%r, value=%r, container=%r" %
                        (resp.body, value, container))
    
        def test_multi_line_headers(self):
            # Multi-line http headers are rare but rfc-allowed
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
            sock, port = bind_unused_port()
            with closing(sock):
                def write_response(stream, request_data):
                    if b"HTTP/1." not in request_data:
                        self.skipTest("requires HTTP/1.x")
                    stream.write(b"""\
    HTTP/1.1 200 OK
    X-XSS-Protection: 1;
    \tmode=block
    
    """.replace(b"\n", b"\r\n"), callback=stream.close)
    
                def accept_callback(conn, address):
                    stream = IOStream(conn, io_loop=self.io_loop)
                    stream.read_until(b"\r\n\r\n",
                                      functools.partial(write_response, stream))
                netutil.add_accept_handler(sock, accept_callback, self.io_loop)
                self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
                resp = self.wait()
                resp.rethrow()
                self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block")
                self.io_loop.remove_handler(sock.fileno())
    
        def test_304_with_content_length(self):
            # According to the spec 304 responses SHOULD NOT include
            # Content-Length or other entity headers, but some servers do it
            # anyway.
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
            response = self.fetch('/304_with_content_length')
            self.assertEqual(response.code, 304)
            self.assertEqual(response.headers['Content-Length'], '42')
    
        def test_final_callback_stack_context(self):
            # The final callback should be run outside of the httpclient's
            # stack_context.  We want to ensure that there is not stack_context
            # between the user's callback and the IOLoop, so monkey-patch
            # IOLoop.handle_callback_exception and disable the test harness's
            # context with a NullContext.
            # Note that this does not apply to secondary callbacks (header
            # and streaming_callback), as errors there must be seen as errors
            # by the http client so it can clean up the connection.
            exc_info = []
    
            def handle_callback_exception(callback):
                exc_info.append(sys.exc_info())
                self.stop()
            self.io_loop.handle_callback_exception = handle_callback_exception
            with NullContext():
                self.http_client.fetch(self.get_url('/hello'),
                                       lambda response: 1 / 0)
            self.wait()
            self.assertEqual(exc_info[0][0], ZeroDivisionError)
    
        @gen_test
        def test_future_interface(self):
            response = yield self.http_client.fetch(self.get_url('/hello'))
            self.assertEqual(response.body, b'Hello world!')
    
        @gen_test
        def test_future_http_error(self):
            with self.assertRaises(HTTPError) as context:
                yield self.http_client.fetch(self.get_url('/notfound'))
            self.assertEqual(context.exception.code, 404)
            self.assertEqual(context.exception.response.code, 404)
    
        @gen_test
        def test_future_http_error_no_raise(self):
            response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
            self.assertEqual(response.code, 404)
    
        @gen_test
        def test_reuse_request_from_response(self):
            # The response.request attribute should be an HTTPRequest, not
            # a _RequestProxy.
            # This test uses self.http_client.fetch because self.fetch calls
            # self.get_url on the input unconditionally.
            url = self.get_url('/hello')
            response = yield self.http_client.fetch(url)
            self.assertEqual(response.request.url, url)
            self.assertTrue(isinstance(response.request, HTTPRequest))
            response2 = yield self.http_client.fetch(response.request)
            self.assertEqual(response2.body, b'Hello world!')
    
        def test_all_methods(self):
            for method in ['GET', 'DELETE', 'OPTIONS']:
                response = self.fetch('/all_methods', method=method)
                self.assertEqual(response.body, utf8(method))
            for method in ['POST', 'PUT', 'PATCH']:
                response = self.fetch('/all_methods', method=method, body=b'')
                self.assertEqual(response.body, utf8(method))
            response = self.fetch('/all_methods', method='HEAD')
            self.assertEqual(response.body, b'')
            response = self.fetch('/all_methods', method='OTHER',
                                  allow_nonstandard_methods=True)
            self.assertEqual(response.body, b'OTHER')
    
        def test_body_sanity_checks(self):
            # These methods require a body.
            for method in ('POST', 'PUT', 'PATCH'):
                with self.assertRaises(ValueError) as context:
                    resp = self.fetch('/all_methods', method=method)
                    resp.rethrow()
                self.assertIn('must not be None', str(context.exception))
    
                resp = self.fetch('/all_methods', method=method,
                                  allow_nonstandard_methods=True)
                self.assertEqual(resp.code, 200)
    
            # These methods don't allow a body.
            for method in ('GET', 'DELETE', 'OPTIONS'):
                with self.assertRaises(ValueError) as context:
                    resp = self.fetch('/all_methods', method=method, body=b'asdf')
                    resp.rethrow()
                self.assertIn('must be None', str(context.exception))
    
                # In most cases this can be overridden, but curl_httpclient
                # does not allow body with a GET at all.
                if method != 'GET':
                    resp = self.fetch('/all_methods', method=method, body=b'asdf',
                                      allow_nonstandard_methods=True)
                    resp.rethrow()
                    self.assertEqual(resp.code, 200)
    
        # This test causes odd failures with the combination of
        # curl_httpclient (at least with the version of libcurl available
        # on ubuntu 12.04), TwistedIOLoop, and epoll.  For POST (but not PUT),
        # curl decides the response came back too soon and closes the connection
        # to start again.  It does this *before* telling the socket callback to
        # unregister the FD.  Some IOLoop implementations have special kernel
        # integration to discover this immediately.  Tornado's IOLoops
        # ignore errors on remove_handler to accommodate this behavior, but
        # Twisted's reactor does not.  The removeReader call fails and so
        # do all future removeAll calls (which our tests do at cleanup).
        #
        # def test_post_307(self):
        #    response = self.fetch("/redirect?status=307&url=/post",
        #                          method="POST", body=b"arg1=foo&arg2=bar")
        #    self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
    
        def test_put_307(self):
            response = self.fetch("/redirect?status=307&url=/put",
                                  method="PUT", body=b"hello")
            response.rethrow()
            self.assertEqual(response.body, b"Put body: hello")
    
        def test_non_ascii_header(self):
            # Non-ascii headers are sent as latin1.
            response = self.fetch("/set_header?k=foo&v=%E9")
            response.rethrow()
            self.assertEqual(response.headers["Foo"], native_str(u"\u00e9"))
    
    
    class RequestProxyTest(unittest.TestCase):
        def test_request_set(self):
            proxy = _RequestProxy(HTTPRequest('http://example.com/',
                                              user_agent='foo'),
                                  dict())
            self.assertEqual(proxy.user_agent, 'foo')
    
        def test_default_set(self):
            proxy = _RequestProxy(HTTPRequest('http://example.com/'),
                                  dict(network_interface='foo'))
            self.assertEqual(proxy.network_interface, 'foo')
    
        def test_both_set(self):
            proxy = _RequestProxy(HTTPRequest('http://example.com/',
                                              proxy_host='foo'),
                                  dict(proxy_host='bar'))
            self.assertEqual(proxy.proxy_host, 'foo')
    
        def test_neither_set(self):
            proxy = _RequestProxy(HTTPRequest('http://example.com/'),
                                  dict())
            self.assertIs(proxy.auth_username, None)
    
        def test_bad_attribute(self):
            proxy = _RequestProxy(HTTPRequest('http://example.com/'),
                                  dict())
            with self.assertRaises(AttributeError):
                proxy.foo
    
        def test_defaults_none(self):
            proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
            self.assertIs(proxy.auth_username, None)
    
    
    class HTTPResponseTestCase(unittest.TestCase):
        def test_str(self):
            response = HTTPResponse(HTTPRequest('http://example.com'),
                                    200, headers={}, buffer=BytesIO())
            s = str(response)
            self.assertTrue(s.startswith('HTTPResponse('))
            self.assertIn('code=200', s)
    
    
    class SyncHTTPClientTest(unittest.TestCase):
        def setUp(self):
            if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
                                                      'AsyncIOMainLoop'):
                # TwistedIOLoop only supports the global reactor, so we can't have
                # separate IOLoops for client and server threads.
                # AsyncIOMainLoop doesn't work with the default policy
                # (although it could with some tweaks to this test and a
                # policy that created loops for non-main threads).
                raise unittest.SkipTest(
                    'Sync HTTPClient not compatible with TwistedIOLoop or '
                    'AsyncIOMainLoop')
            self.server_ioloop = IOLoop()
    
            sock, self.port = bind_unused_port()
            app = Application([('/', HelloWorldHandler)])
            self.server = HTTPServer(app, io_loop=self.server_ioloop)
            self.server.add_socket(sock)
    
            self.server_thread = threading.Thread(target=self.server_ioloop.start)
            self.server_thread.start()
    
            self.http_client = HTTPClient()
    
        def tearDown(self):
            def stop_server():
                self.server.stop()
                # Delay the shutdown of the IOLoop by one iteration because
                # the server may still have some cleanup work left when
                # the client finishes with the response (this is noticable
                # with http/2, which leaves a Future with an unexamined
                # StreamClosedError on the loop).
                self.server_ioloop.add_callback(self.server_ioloop.stop)
            self.server_ioloop.add_callback(stop_server)
            self.server_thread.join()
            self.http_client.close()
            self.server_ioloop.close(all_fds=True)
    
        def get_url(self, path):
            return 'http://127.0.0.1:%d%s' % (self.port, path)
    
        def test_sync_client(self):
            response = self.http_client.fetch(self.get_url('/'))
            self.assertEqual(b'Hello world!', response.body)
    
        def test_sync_client_error(self):
            # Synchronous HTTPClient raises errors directly; no need for
            # response.rethrow()
            with self.assertRaises(HTTPError) as assertion:
                self.http_client.fetch(self.get_url('/notfound'))
            self.assertEqual(assertion.exception.code, 404)
    
    
    class HTTPRequestTestCase(unittest.TestCase):
        def test_headers(self):
            request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
            self.assertEqual(request.headers, {'foo': 'bar'})
    
        def test_headers_setter(self):
            request = HTTPRequest('http://example.com')
            request.headers = {'bar': 'baz'}
            self.assertEqual(request.headers, {'bar': 'baz'})
    
        def test_null_headers_setter(self):
            request = HTTPRequest('http://example.com')
            request.headers = None
            self.assertEqual(request.headers, {})
    
        def test_body(self):
            request = HTTPRequest('http://example.com', body='foo')
            self.assertEqual(request.body, utf8('foo'))
    
        def test_body_setter(self):
            request = HTTPRequest('http://example.com')
            request.body = 'foo'
            self.assertEqual(request.body, utf8('foo'))
    
        def test_if_modified_since(self):
            http_date = datetime.datetime.utcnow()
            request = HTTPRequest('http://example.com', if_modified_since=http_date)
            self.assertEqual(request.headers,
                             {'If-Modified-Since': format_timestamp(http_date)})
    
    
    class HTTPErrorTestCase(unittest.TestCase):
        def test_copy(self):
            e = HTTPError(403)
            e2 = copy.copy(e)
            self.assertIsNot(e, e2)
            self.assertEqual(e.code, e2.code)
    
        def test_plain_error(self):
            e = HTTPError(403)
            self.assertEqual(str(e), "HTTP 403: Forbidden")
            self.assertEqual(repr(e), "HTTP 403: Forbidden")
    
        def test_error_with_response(self):
            resp = HTTPResponse(HTTPRequest('http://example.com/'), 403)
            with self.assertRaises(HTTPError) as cm:
                resp.rethrow()
            e = cm.exception
            self.assertEqual(str(e), "HTTP 403: Forbidden")
            self.assertEqual(repr(e), "HTTP 403: Forbidden")
    tornado-4.5.3/tornado/test/httpserver_test.py000066400000000000000000001224531322420601000214320ustar00rootroot00000000000000#!/usr/bin/env python
    
    
    from __future__ import absolute_import, division, print_function
    from tornado import netutil
    from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
    from tornado import gen
    from tornado.http1connection import HTTP1Connection
    from tornado.httpserver import HTTPServer
    from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine
    from tornado.iostream import IOStream
    from tornado.log import gen_log
    from tornado.netutil import ssl_options_to_context
    from tornado.simple_httpclient import SimpleAsyncHTTPClient
    from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test
    from tornado.test.util import unittest, skipOnTravis
    from tornado.web import Application, RequestHandler, asynchronous, stream_request_body
    from contextlib import closing
    import datetime
    import gzip
    import os
    import shutil
    import socket
    import ssl
    import sys
    import tempfile
    from io import BytesIO
    
    
    def read_stream_body(stream, callback):
        """Reads an HTTP response from `stream` and runs callback with its
        headers and body."""
        chunks = []
    
        class Delegate(HTTPMessageDelegate):
            def headers_received(self, start_line, headers):
                self.headers = headers
    
            def data_received(self, chunk):
                chunks.append(chunk)
    
            def finish(self):
                callback((self.headers, b''.join(chunks)))
        conn = HTTP1Connection(stream, True)
        conn.read_response(Delegate())
    
    
    class HandlerBaseTestCase(AsyncHTTPTestCase):
        def get_app(self):
            return Application([('/', self.__class__.Handler)])
    
        def fetch_json(self, *args, **kwargs):
            response = self.fetch(*args, **kwargs)
            response.rethrow()
            return json_decode(response.body)
    
    
    class HelloWorldRequestHandler(RequestHandler):
        def initialize(self, protocol="http"):
            self.expected_protocol = protocol
    
        def get(self):
            if self.request.protocol != self.expected_protocol:
                raise Exception("unexpected protocol")
            self.finish("Hello world")
    
        def post(self):
            self.finish("Got %d bytes in POST" % len(self.request.body))
    
    
    # In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
    # ClientHello messages, which are rejected by SSLv3 and TLSv1
    # servers.  Note that while the OPENSSL_VERSION_INFO was formally
    # introduced in python3.2, it was present but undocumented in
    # python 2.7
    skipIfOldSSL = unittest.skipIf(
        getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0),
        "old version of ssl module and/or openssl")
    
    
    class BaseSSLTest(AsyncHTTPSTestCase):
        def get_app(self):
            return Application([('/', HelloWorldRequestHandler,
                                 dict(protocol="https"))])
    
    
    class SSLTestMixin(object):
        def get_ssl_options(self):
            return dict(ssl_version=self.get_ssl_version(),  # type: ignore
                        **AsyncHTTPSTestCase.get_ssl_options())
    
        def get_ssl_version(self):
            raise NotImplementedError()
    
        def test_ssl(self):
            response = self.fetch('/')
            self.assertEqual(response.body, b"Hello world")
    
        def test_large_post(self):
            response = self.fetch('/',
                                  method='POST',
                                  body='A' * 5000)
            self.assertEqual(response.body, b"Got 5000 bytes in POST")
    
        def test_non_ssl_request(self):
            # Make sure the server closes the connection when it gets a non-ssl
            # connection, rather than waiting for a timeout or otherwise
            # misbehaving.
            with ExpectLog(gen_log, '(SSL Error|uncaught exception)'):
                with ExpectLog(gen_log, 'Uncaught exception', required=False):
                    self.http_client.fetch(
                        self.get_url("/").replace('https:', 'http:'),
                        self.stop,
                        request_timeout=3600,
                        connect_timeout=3600)
                    response = self.wait()
            self.assertEqual(response.code, 599)
    
        def test_error_logging(self):
            # No stack traces are logged for SSL errors.
            with ExpectLog(gen_log, 'SSL Error') as expect_log:
                self.http_client.fetch(
                    self.get_url("/").replace("https:", "http:"),
                    self.stop)
                response = self.wait()
                self.assertEqual(response.code, 599)
            self.assertFalse(expect_log.logged_stack)
    
    # Python's SSL implementation differs significantly between versions.
    # For example, SSLv3 and TLSv1 throw an exception if you try to read
    # from the socket before the handshake is complete, but the default
    # of SSLv23 allows it.
    
    
    class SSLv23Test(BaseSSLTest, SSLTestMixin):
        def get_ssl_version(self):
            return ssl.PROTOCOL_SSLv23
    
    
    @skipIfOldSSL
    class SSLv3Test(BaseSSLTest, SSLTestMixin):
        def get_ssl_version(self):
            return ssl.PROTOCOL_SSLv3
    
    
    @skipIfOldSSL
    class TLSv1Test(BaseSSLTest, SSLTestMixin):
        def get_ssl_version(self):
            return ssl.PROTOCOL_TLSv1
    
    
    @unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present')
    class SSLContextTest(BaseSSLTest, SSLTestMixin):
        def get_ssl_options(self):
            context = ssl_options_to_context(
                AsyncHTTPSTestCase.get_ssl_options(self))
            assert isinstance(context, ssl.SSLContext)
            return context
    
    
    class BadSSLOptionsTest(unittest.TestCase):
        def test_missing_arguments(self):
            application = Application()
            self.assertRaises(KeyError, HTTPServer, application, ssl_options={
                "keyfile": "/__missing__.crt",
            })
    
        def test_missing_key(self):
            """A missing SSL key should cause an immediate exception."""
    
            application = Application()
            module_dir = os.path.dirname(__file__)
            existing_certificate = os.path.join(module_dir, 'test.crt')
            existing_key = os.path.join(module_dir, 'test.key')
    
            self.assertRaises((ValueError, IOError),
                              HTTPServer, application, ssl_options={
                                  "certfile": "/__mising__.crt",
            })
            self.assertRaises((ValueError, IOError),
                              HTTPServer, application, ssl_options={
                                  "certfile": existing_certificate,
                                  "keyfile": "/__missing__.key"
            })
    
            # This actually works because both files exist
            HTTPServer(application, ssl_options={
                       "certfile": existing_certificate,
                       "keyfile": existing_key,
                       })
    
    
    class MultipartTestHandler(RequestHandler):
        def post(self):
            self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
                         "argument": self.get_argument("argument"),
                         "filename": self.request.files["files"][0].filename,
                         "filebody": _unicode(self.request.files["files"][0]["body"]),
                         })
    
    
    # This test is also called from wsgi_test
    class HTTPConnectionTest(AsyncHTTPTestCase):
        def get_handlers(self):
            return [("/multipart", MultipartTestHandler),
                    ("/hello", HelloWorldRequestHandler)]
    
        def get_app(self):
            return Application(self.get_handlers())
    
        def raw_fetch(self, headers, body, newline=b"\r\n"):
            with closing(IOStream(socket.socket())) as stream:
                stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
                self.wait()
                stream.write(
                    newline.join(headers +
                                 [utf8("Content-Length: %d" % len(body))]) +
                    newline + newline + body)
                read_stream_body(stream, self.stop)
                headers, body = self.wait()
                return body
    
        def test_multipart_form(self):
            # Encodings here are tricky:  Headers are latin1, bodies can be
            # anything (we use utf8 by default).
            response = self.raw_fetch([
                b"POST /multipart HTTP/1.0",
                b"Content-Type: multipart/form-data; boundary=1234567890",
                b"X-Header-encoding-test: \xe9",
            ],
                b"\r\n".join([
                    b"Content-Disposition: form-data; name=argument",
                    b"",
                    u"\u00e1".encode("utf-8"),
                    b"--1234567890",
                    u'Content-Disposition: form-data; name="files"; filename="\u00f3"'.encode("utf8"),
                    b"",
                    u"\u00fa".encode("utf-8"),
                    b"--1234567890--",
                    b"",
                ]))
            data = json_decode(response)
            self.assertEqual(u"\u00e9", data["header"])
            self.assertEqual(u"\u00e1", data["argument"])
            self.assertEqual(u"\u00f3", data["filename"])
            self.assertEqual(u"\u00fa", data["filebody"])
    
        def test_newlines(self):
            # We support both CRLF and bare LF as line separators.
            for newline in (b"\r\n", b"\n"):
                response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"",
                                          newline=newline)
                self.assertEqual(response, b'Hello world')
    
        def test_100_continue(self):
            # Run through a 100-continue interaction by hand:
            # When given Expect: 100-continue, we get a 100 response after the
            # headers, and then the real response after the body.
            stream = IOStream(socket.socket(), io_loop=self.io_loop)
            stream.connect(("127.0.0.1", self.get_http_port()), callback=self.stop)
            self.wait()
            stream.write(b"\r\n".join([b"POST /hello HTTP/1.1",
                                       b"Content-Length: 1024",
                                       b"Expect: 100-continue",
                                       b"Connection: close",
                                       b"\r\n"]), callback=self.stop)
            self.wait()
            stream.read_until(b"\r\n\r\n", self.stop)
            data = self.wait()
            self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
            stream.write(b"a" * 1024)
            stream.read_until(b"\r\n", self.stop)
            first_line = self.wait()
            self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
            stream.read_until(b"\r\n\r\n", self.stop)
            header_data = self.wait()
            headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
            stream.read_bytes(int(headers["Content-Length"]), self.stop)
            body = self.wait()
            self.assertEqual(body, b"Got 1024 bytes in POST")
            stream.close()
    
    
    class EchoHandler(RequestHandler):
        def get(self):
            self.write(recursive_unicode(self.request.arguments))
    
        def post(self):
            self.write(recursive_unicode(self.request.arguments))
    
    
    class TypeCheckHandler(RequestHandler):
        def prepare(self):
            self.errors = {}
            fields = [
                ('method', str),
                ('uri', str),
                ('version', str),
                ('remote_ip', str),
                ('protocol', str),
                ('host', str),
                ('path', str),
                ('query', str),
            ]
            for field, expected_type in fields:
                self.check_type(field, getattr(self.request, field), expected_type)
    
            self.check_type('header_key', list(self.request.headers.keys())[0], str)
            self.check_type('header_value', list(self.request.headers.values())[0], str)
    
            self.check_type('cookie_key', list(self.request.cookies.keys())[0], str)
            self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str)
            # secure cookies
    
            self.check_type('arg_key', list(self.request.arguments.keys())[0], str)
            self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes)
    
        def post(self):
            self.check_type('body', self.request.body, bytes)
            self.write(self.errors)
    
        def get(self):
            self.write(self.errors)
    
        def check_type(self, name, obj, expected_type):
            actual_type = type(obj)
            if expected_type != actual_type:
                self.errors[name] = "expected %s, got %s" % (expected_type,
                                                             actual_type)
    
    
    class HTTPServerTest(AsyncHTTPTestCase):
        def get_app(self):
            return Application([("/echo", EchoHandler),
                                ("/typecheck", TypeCheckHandler),
                                ("//doubleslash", EchoHandler),
                                ])
    
        def test_query_string_encoding(self):
            response = self.fetch("/echo?foo=%C3%A9")
            data = json_decode(response.body)
            self.assertEqual(data, {u"foo": [u"\u00e9"]})
    
        def test_empty_query_string(self):
            response = self.fetch("/echo?foo=&foo=")
            data = json_decode(response.body)
            self.assertEqual(data, {u"foo": [u"", u""]})
    
        def test_empty_post_parameters(self):
            response = self.fetch("/echo", method="POST", body="foo=&bar=")
            data = json_decode(response.body)
            self.assertEqual(data, {u"foo": [u""], u"bar": [u""]})
    
        def test_types(self):
            headers = {"Cookie": "foo=bar"}
            response = self.fetch("/typecheck?foo=bar", headers=headers)
            data = json_decode(response.body)
            self.assertEqual(data, {})
    
            response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers)
            data = json_decode(response.body)
            self.assertEqual(data, {})
    
        def test_double_slash(self):
            # urlparse.urlsplit (which tornado.httpserver used to use
            # incorrectly) would parse paths beginning with "//" as
            # protocol-relative urls.
            response = self.fetch("//doubleslash")
            self.assertEqual(200, response.code)
            self.assertEqual(json_decode(response.body), {})
    
        def test_malformed_body(self):
            # parse_qs is pretty forgiving, but it will fail on python 3
            # if the data is not utf8.  On python 2 parse_qs will work,
            # but then the recursive_unicode call in EchoHandler will
            # fail.
            if str is bytes:
                return
            with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'):
                response = self.fetch(
                    '/echo', method="POST",
                    headers={'Content-Type': 'application/x-www-form-urlencoded'},
                    body=b'\xe9')
            self.assertEqual(200, response.code)
            self.assertEqual(b'{}', response.body)
    
    
    class HTTPServerRawTest(AsyncHTTPTestCase):
        def get_app(self):
            return Application([
                ('/echo', EchoHandler),
            ])
    
        def setUp(self):
            super(HTTPServerRawTest, self).setUp()
            self.stream = IOStream(socket.socket())
            self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
            self.wait()
    
        def tearDown(self):
            self.stream.close()
            super(HTTPServerRawTest, self).tearDown()
    
        def test_empty_request(self):
            self.stream.close()
            self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
            self.wait()
    
        def test_malformed_first_line(self):
            with ExpectLog(gen_log, '.*Malformed HTTP request line'):
                self.stream.write(b'asdf\r\n\r\n')
                # TODO: need an async version of ExpectLog so we don't need
                # hard-coded timeouts here.
                self.io_loop.add_timeout(datetime.timedelta(seconds=0.05),
                                         self.stop)
                self.wait()
    
        def test_malformed_headers(self):
            with ExpectLog(gen_log, '.*Malformed HTTP headers'):
                self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n')
                self.io_loop.add_timeout(datetime.timedelta(seconds=0.05),
                                         self.stop)
                self.wait()
    
        def test_chunked_request_body(self):
            # Chunked requests are not widely supported and we don't have a way
            # to generate them in AsyncHTTPClient, but HTTPServer will read them.
            self.stream.write(b"""\
    POST /echo HTTP/1.1
    Transfer-Encoding: chunked
    Content-Type: application/x-www-form-urlencoded
    
    4
    foo=
    3
    bar
    0
    
    """.replace(b"\n", b"\r\n"))
            read_stream_body(self.stream, self.stop)
            headers, response = self.wait()
            self.assertEqual(json_decode(response), {u'foo': [u'bar']})
    
        def test_chunked_request_uppercase(self):
            # As per RFC 2616 section 3.6, "Transfer-Encoding" header's value is
            # case-insensitive.
            self.stream.write(b"""\
    POST /echo HTTP/1.1
    Transfer-Encoding: Chunked
    Content-Type: application/x-www-form-urlencoded
    
    4
    foo=
    3
    bar
    0
    
    """.replace(b"\n", b"\r\n"))
            read_stream_body(self.stream, self.stop)
            headers, response = self.wait()
            self.assertEqual(json_decode(response), {u'foo': [u'bar']})
    
        def test_invalid_content_length(self):
            with ExpectLog(gen_log, '.*Only integer Content-Length is allowed'):
                self.stream.write(b"""\
    POST /echo HTTP/1.1
    Content-Length: foo
    
    bar
    
    """.replace(b"\n", b"\r\n"))
                self.stream.read_until_close(self.stop)
                self.wait()
    
    
    class XHeaderTest(HandlerBaseTestCase):
        class Handler(RequestHandler):
            def get(self):
                self.write(dict(remote_ip=self.request.remote_ip,
                                remote_protocol=self.request.protocol))
    
        def get_httpserver_options(self):
            return dict(xheaders=True, trusted_downstream=['5.5.5.5'])
    
        def test_ip_headers(self):
            self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1")
    
            valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
            self.assertEqual(
                self.fetch_json("/", headers=valid_ipv4)["remote_ip"],
                "4.4.4.4")
    
            valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"}
            self.assertEqual(
                self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
                "4.4.4.4")
    
            valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
            self.assertEqual(
                self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
                "2620:0:1cfe:face:b00c::3")
    
            valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"}
            self.assertEqual(
                self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"],
                "2620:0:1cfe:face:b00c::3")
    
            invalid_chars = {"X-Real-IP": "4.4.4.4
    
    '
                           for p in paths)
    
        def render_embed_js(self, js_embed):
            """Default method used to render the final embedded js for the
            rendered webpage.
    
            Override this method in a sub-classed controller to change the output.
            """
            return b''
    
        def render_linked_css(self, css_files):
            """Default method used to render the final css links for the
            rendered webpage.
    
            Override this method in a sub-classed controller to change the output.
            """
            paths = []
            unique_paths = set()
    
            for path in css_files:
                if not is_absolute(path):
                    path = self.static_url(path)
                if path not in unique_paths:
                    paths.append(path)
                    unique_paths.add(path)
    
            return ''.join(''
                           for p in paths)
    
        def render_embed_css(self, css_embed):
            """Default method used to render the final embedded css for the
            rendered webpage.
    
            Override this method in a sub-classed controller to change the output.
            """
            return b''
    
        def render_string(self, template_name, **kwargs):
            """Generate the given template with the given arguments.
    
            We return the generated byte string (in utf8). To generate and
            write a template as a response, use render() above.
            """
            # If no template_path is specified, use the path of the calling file
            template_path = self.get_template_path()
            if not template_path:
                frame = sys._getframe(0)
                web_file = frame.f_code.co_filename
                while frame.f_code.co_filename == web_file:
                    frame = frame.f_back
                template_path = os.path.dirname(frame.f_code.co_filename)
            with RequestHandler._template_loader_lock:
                if template_path not in RequestHandler._template_loaders:
                    loader = self.create_template_loader(template_path)
                    RequestHandler._template_loaders[template_path] = loader
                else:
                    loader = RequestHandler._template_loaders[template_path]
            t = loader.load(template_name)
            namespace = self.get_template_namespace()
            namespace.update(kwargs)
            return t.generate(**namespace)
    
        def get_template_namespace(self):
            """Returns a dictionary to be used as the default template namespace.
    
            May be overridden by subclasses to add or modify values.
    
            The results of this method will be combined with additional
            defaults in the `tornado.template` module and keyword arguments
            to `render` or `render_string`.
            """
            namespace = dict(
                handler=self,
                request=self.request,
                current_user=self.current_user,
                locale=self.locale,
                _=self.locale.translate,
                pgettext=self.locale.pgettext,
                static_url=self.static_url,
                xsrf_form_html=self.xsrf_form_html,
                reverse_url=self.reverse_url
            )
            namespace.update(self.ui)
            return namespace
    
        def create_template_loader(self, template_path):
            """Returns a new template loader for the given path.
    
            May be overridden by subclasses.  By default returns a
            directory-based loader on the given path, using the
            ``autoescape`` and ``template_whitespace`` application
            settings.  If a ``template_loader`` application setting is
            supplied, uses that instead.
            """
            settings = self.application.settings
            if "template_loader" in settings:
                return settings["template_loader"]
            kwargs = {}
            if "autoescape" in settings:
                # autoescape=None means "no escaping", so we have to be sure
                # to only pass this kwarg if the user asked for it.
                kwargs["autoescape"] = settings["autoescape"]
            if "template_whitespace" in settings:
                kwargs["whitespace"] = settings["template_whitespace"]
            return template.Loader(template_path, **kwargs)
    
        def flush(self, include_footers=False, callback=None):
            """Flushes the current output buffer to the network.
    
            The ``callback`` argument, if given, can be used for flow control:
            it will be run when all flushed data has been written to the socket.
            Note that only one flush callback can be outstanding at a time;
            if another flush occurs before the previous flush's callback
            has been run, the previous callback will be discarded.
    
            .. versionchanged:: 4.0
               Now returns a `.Future` if no callback is given.
            """
            chunk = b"".join(self._write_buffer)
            self._write_buffer = []
            if not self._headers_written:
                self._headers_written = True
                for transform in self._transforms:
                    self._status_code, self._headers, chunk = \
                        transform.transform_first_chunk(
                            self._status_code, self._headers,
                            chunk, include_footers)
                # Ignore the chunk and only write the headers for HEAD requests
                if self.request.method == "HEAD":
                    chunk = None
    
                # Finalize the cookie headers (which have been stored in a side
                # object so an outgoing cookie could be overwritten before it
                # is sent).
                if hasattr(self, "_new_cookie"):
                    for cookie in self._new_cookie.values():
                        self.add_header("Set-Cookie", cookie.OutputString(None))
    
                start_line = httputil.ResponseStartLine('',
                                                        self._status_code,
                                                        self._reason)
                return self.request.connection.write_headers(
                    start_line, self._headers, chunk, callback=callback)
            else:
                for transform in self._transforms:
                    chunk = transform.transform_chunk(chunk, include_footers)
                # Ignore the chunk and only write the headers for HEAD requests
                if self.request.method != "HEAD":
                    return self.request.connection.write(chunk, callback=callback)
                else:
                    future = Future()
                    future.set_result(None)
                    return future
    
        def finish(self, chunk=None):
            """Finishes this response, ending the HTTP request."""
            if self._finished:
                raise RuntimeError("finish() called twice")
    
            if chunk is not None:
                self.write(chunk)
    
            # Automatically support ETags and add the Content-Length header if
            # we have not flushed any content yet.
            if not self._headers_written:
                if (self._status_code == 200 and
                    self.request.method in ("GET", "HEAD") and
                        "Etag" not in self._headers):
                    self.set_etag_header()
                    if self.check_etag_header():
                        self._write_buffer = []
                        self.set_status(304)
                if (self._status_code in (204, 304) or
                    (self._status_code >= 100 and self._status_code < 200)):
                    assert not self._write_buffer, "Cannot send body with %s" % self._status_code
                    self._clear_headers_for_304()
                elif "Content-Length" not in self._headers:
                    content_length = sum(len(part) for part in self._write_buffer)
                    self.set_header("Content-Length", content_length)
    
            if hasattr(self.request, "connection"):
                # Now that the request is finished, clear the callback we
                # set on the HTTPConnection (which would otherwise prevent the
                # garbage collection of the RequestHandler when there
                # are keepalive connections)
                self.request.connection.set_close_callback(None)
    
            self.flush(include_footers=True)
            self.request.finish()
            self._log()
            self._finished = True
            self.on_finish()
            self._break_cycles()
    
        def _break_cycles(self):
            # Break up a reference cycle between this handler and the
            # _ui_module closures to allow for faster GC on CPython.
            self.ui = None
    
        def send_error(self, status_code=500, **kwargs):
            """Sends the given HTTP error code to the browser.
    
            If `flush()` has already been called, it is not possible to send
            an error, so this method will simply terminate the response.
            If output has been written but not yet flushed, it will be discarded
            and replaced with the error page.
    
            Override `write_error()` to customize the error page that is returned.
            Additional keyword arguments are passed through to `write_error`.
            """
            if self._headers_written:
                gen_log.error("Cannot send error response after headers written")
                if not self._finished:
                    # If we get an error between writing headers and finishing,
                    # we are unlikely to be able to finish due to a
                    # Content-Length mismatch. Try anyway to release the
                    # socket.
                    try:
                        self.finish()
                    except Exception:
                        gen_log.error("Failed to flush partial response",
                                      exc_info=True)
                return
            self.clear()
    
            reason = kwargs.get('reason')
            if 'exc_info' in kwargs:
                exception = kwargs['exc_info'][1]
                if isinstance(exception, HTTPError) and exception.reason:
                    reason = exception.reason
            self.set_status(status_code, reason=reason)
            try:
                self.write_error(status_code, **kwargs)
            except Exception:
                app_log.error("Uncaught exception in write_error", exc_info=True)
            if not self._finished:
                self.finish()
    
        def write_error(self, status_code, **kwargs):
            """Override to implement custom error pages.
    
            ``write_error`` may call `write`, `render`, `set_header`, etc
            to produce output as usual.
    
            If this error was caused by an uncaught exception (including
            HTTPError), an ``exc_info`` triple will be available as
            ``kwargs["exc_info"]``.  Note that this exception may not be
            the "current" exception for purposes of methods like
            ``sys.exc_info()`` or ``traceback.format_exc``.
            """
            if self.settings.get("serve_traceback") and "exc_info" in kwargs:
                # in debug mode, try to send a traceback
                self.set_header('Content-Type', 'text/plain')
                for line in traceback.format_exception(*kwargs["exc_info"]):
                    self.write(line)
                self.finish()
            else:
                self.finish("%(code)d: %(message)s"
                            "%(code)d: %(message)s" % {
                                "code": status_code,
                                "message": self._reason,
                            })
    
        @property
        def locale(self):
            """The locale for the current session.
    
            Determined by either `get_user_locale`, which you can override to
            set the locale based on, e.g., a user preference stored in a
            database, or `get_browser_locale`, which uses the ``Accept-Language``
            header.
    
            .. versionchanged: 4.1
               Added a property setter.
            """
            if not hasattr(self, "_locale"):
                self._locale = self.get_user_locale()
                if not self._locale:
                    self._locale = self.get_browser_locale()
                    assert self._locale
            return self._locale
    
        @locale.setter
        def locale(self, value):
            self._locale = value
    
        def get_user_locale(self):
            """Override to determine the locale from the authenticated user.
    
            If None is returned, we fall back to `get_browser_locale()`.
    
            This method should return a `tornado.locale.Locale` object,
            most likely obtained via a call like ``tornado.locale.get("en")``
            """
            return None
    
        def get_browser_locale(self, default="en_US"):
            """Determines the user's locale from ``Accept-Language`` header.
    
            See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
            """
            if "Accept-Language" in self.request.headers:
                languages = self.request.headers["Accept-Language"].split(",")
                locales = []
                for language in languages:
                    parts = language.strip().split(";")
                    if len(parts) > 1 and parts[1].startswith("q="):
                        try:
                            score = float(parts[1][2:])
                        except (ValueError, TypeError):
                            score = 0.0
                    else:
                        score = 1.0
                    locales.append((parts[0], score))
                if locales:
                    locales.sort(key=lambda pair: pair[1], reverse=True)
                    codes = [l[0] for l in locales]
                    return locale.get(*codes)
            return locale.get(default)
    
        @property
        def current_user(self):
            """The authenticated user for this request.
    
            This is set in one of two ways:
    
            * A subclass may override `get_current_user()`, which will be called
              automatically the first time ``self.current_user`` is accessed.
              `get_current_user()` will only be called once per request,
              and is cached for future access::
    
                  def get_current_user(self):
                      user_cookie = self.get_secure_cookie("user")
                      if user_cookie:
                          return json.loads(user_cookie)
                      return None
    
            * It may be set as a normal variable, typically from an overridden
              `prepare()`::
    
                  @gen.coroutine
                  def prepare(self):
                      user_id_cookie = self.get_secure_cookie("user_id")
                      if user_id_cookie:
                          self.current_user = yield load_user(user_id_cookie)
    
            Note that `prepare()` may be a coroutine while `get_current_user()`
            may not, so the latter form is necessary if loading the user requires
            asynchronous operations.
    
            The user object may be any type of the application's choosing.
            """
            if not hasattr(self, "_current_user"):
                self._current_user = self.get_current_user()
            return self._current_user
    
        @current_user.setter
        def current_user(self, value):
            self._current_user = value
    
        def get_current_user(self):
            """Override to determine the current user from, e.g., a cookie.
    
            This method may not be a coroutine.
            """
            return None
    
        def get_login_url(self):
            """Override to customize the login URL based on the request.
    
            By default, we use the ``login_url`` application setting.
            """
            self.require_setting("login_url", "@tornado.web.authenticated")
            return self.application.settings["login_url"]
    
        def get_template_path(self):
            """Override to customize template path for each handler.
    
            By default, we use the ``template_path`` application setting.
            Return None to load templates relative to the calling file.
            """
            return self.application.settings.get("template_path")
    
        @property
        def xsrf_token(self):
            """The XSRF-prevention token for the current user/session.
    
            To prevent cross-site request forgery, we set an '_xsrf' cookie
            and include the same '_xsrf' value as an argument with all POST
            requests. If the two do not match, we reject the form submission
            as a potential forgery.
    
            See http://en.wikipedia.org/wiki/Cross-site_request_forgery
    
            .. versionchanged:: 3.2.2
               The xsrf token will now be have a random mask applied in every
               request, which makes it safe to include the token in pages
               that are compressed.  See http://breachattack.com for more
               information on the issue fixed by this change.  Old (version 1)
               cookies will be converted to version 2 when this method is called
               unless the ``xsrf_cookie_version`` `Application` setting is
               set to 1.
    
            .. versionchanged:: 4.3
               The ``xsrf_cookie_kwargs`` `Application` setting may be
               used to supply additional cookie options (which will be
               passed directly to `set_cookie`). For example,
               ``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
               will set the ``secure`` and ``httponly`` flags on the
               ``_xsrf`` cookie.
            """
            if not hasattr(self, "_xsrf_token"):
                version, token, timestamp = self._get_raw_xsrf_token()
                output_version = self.settings.get("xsrf_cookie_version", 2)
                cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
                if output_version == 1:
                    self._xsrf_token = binascii.b2a_hex(token)
                elif output_version == 2:
                    mask = os.urandom(4)
                    self._xsrf_token = b"|".join([
                        b"2",
                        binascii.b2a_hex(mask),
                        binascii.b2a_hex(_websocket_mask(mask, token)),
                        utf8(str(int(timestamp)))])
                else:
                    raise ValueError("unknown xsrf cookie version %d",
                                     output_version)
                if version is None:
                    expires_days = 30 if self.current_user else None
                    self.set_cookie("_xsrf", self._xsrf_token,
                                    expires_days=expires_days,
                                    **cookie_kwargs)
            return self._xsrf_token
    
        def _get_raw_xsrf_token(self):
            """Read or generate the xsrf token in its raw form.
    
            The raw_xsrf_token is a tuple containing:
    
            * version: the version of the cookie from which this token was read,
              or None if we generated a new token in this request.
            * token: the raw token data; random (non-ascii) bytes.
            * timestamp: the time this token was generated (will not be accurate
              for version 1 cookies)
            """
            if not hasattr(self, '_raw_xsrf_token'):
                cookie = self.get_cookie("_xsrf")
                if cookie:
                    version, token, timestamp = self._decode_xsrf_token(cookie)
                else:
                    version, token, timestamp = None, None, None
                if token is None:
                    version = None
                    token = os.urandom(16)
                    timestamp = time.time()
                self._raw_xsrf_token = (version, token, timestamp)
            return self._raw_xsrf_token
    
        def _decode_xsrf_token(self, cookie):
            """Convert a cookie string into a the tuple form returned by
            _get_raw_xsrf_token.
            """
    
            try:
                m = _signed_value_version_re.match(utf8(cookie))
    
                if m:
                    version = int(m.group(1))
                    if version == 2:
                        _, mask, masked_token, timestamp = cookie.split("|")
    
                        mask = binascii.a2b_hex(utf8(mask))
                        token = _websocket_mask(
                            mask, binascii.a2b_hex(utf8(masked_token)))
                        timestamp = int(timestamp)
                        return version, token, timestamp
                    else:
                        # Treat unknown versions as not present instead of failing.
                        raise Exception("Unknown xsrf cookie version")
                else:
                    version = 1
                    try:
                        token = binascii.a2b_hex(utf8(cookie))
                    except (binascii.Error, TypeError):
                        token = utf8(cookie)
                    # We don't have a usable timestamp in older versions.
                    timestamp = int(time.time())
                    return (version, token, timestamp)
            except Exception:
                # Catch exceptions and return nothing instead of failing.
                gen_log.debug("Uncaught exception in _decode_xsrf_token",
                              exc_info=True)
                return None, None, None
    
        def check_xsrf_cookie(self):
            """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
    
            To prevent cross-site request forgery, we set an ``_xsrf``
            cookie and include the same value as a non-cookie
            field with all ``POST`` requests. If the two do not match, we
            reject the form submission as a potential forgery.
    
            The ``_xsrf`` value may be set as either a form field named ``_xsrf``
            or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
            (the latter is accepted for compatibility with Django).
    
            See http://en.wikipedia.org/wiki/Cross-site_request_forgery
    
            Prior to release 1.1.1, this check was ignored if the HTTP header
            ``X-Requested-With: XMLHTTPRequest`` was present.  This exception
            has been shown to be insecure and has been removed.  For more
            information please see
            http://www.djangoproject.com/weblog/2011/feb/08/security/
            http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
    
            .. versionchanged:: 3.2.2
               Added support for cookie version 2.  Both versions 1 and 2 are
               supported.
            """
            token = (self.get_argument("_xsrf", None) or
                     self.request.headers.get("X-Xsrftoken") or
                     self.request.headers.get("X-Csrftoken"))
            if not token:
                raise HTTPError(403, "'_xsrf' argument missing from POST")
            _, token, _ = self._decode_xsrf_token(token)
            _, expected_token, _ = self._get_raw_xsrf_token()
            if not token:
                raise HTTPError(403, "'_xsrf' argument has invalid format")
            if not _time_independent_equals(utf8(token), utf8(expected_token)):
                raise HTTPError(403, "XSRF cookie does not match POST argument")
    
        def xsrf_form_html(self):
            """An HTML ```` element to be included with all POST forms.
    
            It defines the ``_xsrf`` input value, which we check on all POST
            requests to prevent cross-site request forgery. If you have set
            the ``xsrf_cookies`` application setting, you must include this
            HTML within all of your HTML forms.
    
            In a template, this method should be called with ``{% module
            xsrf_form_html() %}``
    
            See `check_xsrf_cookie()` above for more information.
            """
            return ''
    
        def static_url(self, path, include_host=None, **kwargs):
            """Returns a static URL for the given relative static file path.
    
            This method requires you set the ``static_path`` setting in your
            application (which specifies the root directory of your static
            files).
    
            This method returns a versioned url (by default appending
            ``?v=``), which allows the static files to be
            cached indefinitely.  This can be disabled by passing
            ``include_version=False`` (in the default implementation;
            other static file implementations are not required to support
            this, but they may support other options).
    
            By default this method returns URLs relative to the current
            host, but if ``include_host`` is true the URL returned will be
            absolute.  If this handler has an ``include_host`` attribute,
            that value will be used as the default for all `static_url`
            calls that do not pass ``include_host`` as a keyword argument.
    
            """
            self.require_setting("static_path", "static_url")
            get_url = self.settings.get("static_handler_class",
                                        StaticFileHandler).make_static_url
    
            if include_host is None:
                include_host = getattr(self, "include_host", False)
    
            if include_host:
                base = self.request.protocol + "://" + self.request.host
            else:
                base = ""
    
            return base + get_url(self.settings, path, **kwargs)
    
        def require_setting(self, name, feature="this feature"):
            """Raises an exception if the given app setting is not defined."""
            if not self.application.settings.get(name):
                raise Exception("You must define the '%s' setting in your "
                                "application to use %s" % (name, feature))
    
        def reverse_url(self, name, *args):
            """Alias for `Application.reverse_url`."""
            return self.application.reverse_url(name, *args)
    
        def compute_etag(self):
            """Computes the etag header to be used for this request.
    
            By default uses a hash of the content written so far.
    
            May be overridden to provide custom etag implementations,
            or may return None to disable tornado's default etag support.
            """
            hasher = hashlib.sha1()
            for part in self._write_buffer:
                hasher.update(part)
            return '"%s"' % hasher.hexdigest()
    
        def set_etag_header(self):
            """Sets the response's Etag header using ``self.compute_etag()``.
    
            Note: no header will be set if ``compute_etag()`` returns ``None``.
    
            This method is called automatically when the request is finished.
            """
            etag = self.compute_etag()
            if etag is not None:
                self.set_header("Etag", etag)
    
        def check_etag_header(self):
            """Checks the ``Etag`` header against requests's ``If-None-Match``.
    
            Returns ``True`` if the request's Etag matches and a 304 should be
            returned. For example::
    
                self.set_etag_header()
                if self.check_etag_header():
                    self.set_status(304)
                    return
    
            This method is called automatically when the request is finished,
            but may be called earlier for applications that override
            `compute_etag` and want to do an early check for ``If-None-Match``
            before completing the request.  The ``Etag`` header should be set
            (perhaps with `set_etag_header`) before calling this method.
            """
            computed_etag = utf8(self._headers.get("Etag", ""))
            # Find all weak and strong etag values from If-None-Match header
            # because RFC 7232 allows multiple etag values in a single header.
            etags = re.findall(
                br'\*|(?:W/)?"[^"]*"',
                utf8(self.request.headers.get("If-None-Match", ""))
            )
            if not computed_etag or not etags:
                return False
    
            match = False
            if etags[0] == b'*':
                match = True
            else:
                # Use a weak comparison when comparing entity-tags.
                def val(x):
                    return x[2:] if x.startswith(b'W/') else x
    
                for etag in etags:
                    if val(etag) == val(computed_etag):
                        match = True
                        break
            return match
    
        def _stack_context_handle_exception(self, type, value, traceback):
            try:
                # For historical reasons _handle_request_exception only takes
                # the exception value instead of the full triple,
                # so re-raise the exception to ensure that it's in
                # sys.exc_info()
                raise_exc_info((type, value, traceback))
            except Exception:
                self._handle_request_exception(value)
            return True
    
        @gen.coroutine
        def _execute(self, transforms, *args, **kwargs):
            """Executes this request with the given output transforms."""
            self._transforms = transforms
            try:
                if self.request.method not in self.SUPPORTED_METHODS:
                    raise HTTPError(405)
                self.path_args = [self.decode_argument(arg) for arg in args]
                self.path_kwargs = dict((k, self.decode_argument(v, name=k))
                                        for (k, v) in kwargs.items())
                # If XSRF cookies are turned on, reject form submissions without
                # the proper cookie
                if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
                        self.application.settings.get("xsrf_cookies"):
                    self.check_xsrf_cookie()
    
                result = self.prepare()
                if result is not None:
                    result = yield result
                if self._prepared_future is not None:
                    # Tell the Application we've finished with prepare()
                    # and are ready for the body to arrive.
                    self._prepared_future.set_result(None)
                if self._finished:
                    return
    
                if _has_stream_request_body(self.__class__):
                    # In streaming mode request.body is a Future that signals
                    # the body has been completely received.  The Future has no
                    # result; the data has been passed to self.data_received
                    # instead.
                    try:
                        yield self.request.body
                    except iostream.StreamClosedError:
                        return
    
                method = getattr(self, self.request.method.lower())
                result = method(*self.path_args, **self.path_kwargs)
                if result is not None:
                    result = yield result
                if self._auto_finish and not self._finished:
                    self.finish()
            except Exception as e:
                try:
                    self._handle_request_exception(e)
                except Exception:
                    app_log.error("Exception in exception handler", exc_info=True)
                if (self._prepared_future is not None and
                        not self._prepared_future.done()):
                    # In case we failed before setting _prepared_future, do it
                    # now (to unblock the HTTP server).  Note that this is not
                    # in a finally block to avoid GC issues prior to Python 3.4.
                    self._prepared_future.set_result(None)
    
        def data_received(self, chunk):
            """Implement this method to handle streamed request data.
    
            Requires the `.stream_request_body` decorator.
            """
            raise NotImplementedError()
    
        def _log(self):
            """Logs the current request.
    
            Sort of deprecated since this functionality was moved to the
            Application, but left in place for the benefit of existing apps
            that have overridden this method.
            """
            self.application.log_request(self)
    
        def _request_summary(self):
            return "%s %s (%s)" % (self.request.method, self.request.uri,
                                   self.request.remote_ip)
    
        def _handle_request_exception(self, e):
            if isinstance(e, Finish):
                # Not an error; just finish the request without logging.
                if not self._finished:
                    self.finish(*e.args)
                return
            try:
                self.log_exception(*sys.exc_info())
            except Exception:
                # An error here should still get a best-effort send_error()
                # to avoid leaking the connection.
                app_log.error("Error in exception logger", exc_info=True)
            if self._finished:
                # Extra errors after the request has been finished should
                # be logged, but there is no reason to continue to try and
                # send a response.
                return
            if isinstance(e, HTTPError):
                if e.status_code not in httputil.responses and not e.reason:
                    gen_log.error("Bad HTTP status code: %d", e.status_code)
                    self.send_error(500, exc_info=sys.exc_info())
                else:
                    self.send_error(e.status_code, exc_info=sys.exc_info())
            else:
                self.send_error(500, exc_info=sys.exc_info())
    
        def log_exception(self, typ, value, tb):
            """Override to customize logging of uncaught exceptions.
    
            By default logs instances of `HTTPError` as warnings without
            stack traces (on the ``tornado.general`` logger), and all
            other exceptions as errors with stack traces (on the
            ``tornado.application`` logger).
    
            .. versionadded:: 3.1
            """
            if isinstance(value, HTTPError):
                if value.log_message:
                    format = "%d %s: " + value.log_message
                    args = ([value.status_code, self._request_summary()] +
                            list(value.args))
                    gen_log.warning(format, *args)
            else:
                app_log.error("Uncaught exception %s\n%r", self._request_summary(),
                              self.request, exc_info=(typ, value, tb))
    
        def _ui_module(self, name, module):
            def render(*args, **kwargs):
                if not hasattr(self, "_active_modules"):
                    self._active_modules = {}
                if name not in self._active_modules:
                    self._active_modules[name] = module(self)
                rendered = self._active_modules[name].render(*args, **kwargs)
                return rendered
            return render
    
        def _ui_method(self, method):
            return lambda *args, **kwargs: method(self, *args, **kwargs)
    
        def _clear_headers_for_304(self):
            # 304 responses should not contain entity headers (defined in
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
            # not explicitly allowed by
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
            headers = ["Allow", "Content-Encoding", "Content-Language",
                       "Content-Length", "Content-MD5", "Content-Range",
                       "Content-Type", "Last-Modified"]
            for h in headers:
                self.clear_header(h)
    
    
    def asynchronous(method):
        """Wrap request handler methods with this if they are asynchronous.
    
        This decorator is for callback-style asynchronous methods; for
        coroutines, use the ``@gen.coroutine`` decorator without
        ``@asynchronous``. (It is legal for legacy reasons to use the two
        decorators together provided ``@asynchronous`` is first, but
        ``@asynchronous`` will be ignored in this case)
    
        This decorator should only be applied to the :ref:`HTTP verb
        methods `; its behavior is undefined for any other method.
        This decorator does not *make* a method asynchronous; it tells
        the framework that the method *is* asynchronous.  For this decorator
        to be useful the method must (at least sometimes) do something
        asynchronous.
    
        If this decorator is given, the response is not finished when the
        method returns. It is up to the request handler to call
        `self.finish() ` to finish the HTTP
        request. Without this decorator, the request is automatically
        finished when the ``get()`` or ``post()`` method returns. Example:
    
        .. testcode::
    
           class MyRequestHandler(RequestHandler):
               @asynchronous
               def get(self):
                  http = httpclient.AsyncHTTPClient()
                  http.fetch("http://friendfeed.com/", self._on_download)
    
               def _on_download(self, response):
                  self.write("Downloaded!")
                  self.finish()
    
        .. testoutput::
           :hide:
    
        .. versionchanged:: 3.1
           The ability to use ``@gen.coroutine`` without ``@asynchronous``.
    
        .. versionchanged:: 4.3 Returning anything but ``None`` or a
           yieldable object from a method decorated with ``@asynchronous``
           is an error. Such return values were previously ignored silently.
        """
        # Delay the IOLoop import because it's not available on app engine.
        from tornado.ioloop import IOLoop
    
        @functools.wraps(method)
        def wrapper(self, *args, **kwargs):
            self._auto_finish = False
            with stack_context.ExceptionStackContext(
                    self._stack_context_handle_exception):
                result = method(self, *args, **kwargs)
                if result is not None:
                    result = gen.convert_yielded(result)
    
                    # If @asynchronous is used with @gen.coroutine, (but
                    # not @gen.engine), we can automatically finish the
                    # request when the future resolves.  Additionally,
                    # the Future will swallow any exceptions so we need
                    # to throw them back out to the stack context to finish
                    # the request.
                    def future_complete(f):
                        f.result()
                        if not self._finished:
                            self.finish()
                    IOLoop.current().add_future(result, future_complete)
                    # Once we have done this, hide the Future from our
                    # caller (i.e. RequestHandler._when_complete), which
                    # would otherwise set up its own callback and
                    # exception handler (resulting in exceptions being
                    # logged twice).
                    return None
                return result
        return wrapper
    
    
    def stream_request_body(cls):
        """Apply to `RequestHandler` subclasses to enable streaming body support.
    
        This decorator implies the following changes:
    
        * `.HTTPServerRequest.body` is undefined, and body arguments will not
          be included in `RequestHandler.get_argument`.
        * `RequestHandler.prepare` is called when the request headers have been
          read instead of after the entire body has been read.
        * The subclass must define a method ``data_received(self, data):``, which
          will be called zero or more times as data is available.  Note that
          if the request has an empty body, ``data_received`` may not be called.
        * ``prepare`` and ``data_received`` may return Futures (such as via
          ``@gen.coroutine``, in which case the next method will not be called
          until those futures have completed.
        * The regular HTTP method (``post``, ``put``, etc) will be called after
          the entire body has been read.
    
        See the `file receiver demo `_
        for example usage.
        """
        if not issubclass(cls, RequestHandler):
            raise TypeError("expected subclass of RequestHandler, got %r", cls)
        cls._stream_request_body = True
        return cls
    
    
    def _has_stream_request_body(cls):
        if not issubclass(cls, RequestHandler):
            raise TypeError("expected subclass of RequestHandler, got %r", cls)
        return getattr(cls, '_stream_request_body', False)
    
    
    def removeslash(method):
        """Use this decorator to remove trailing slashes from the request path.
    
        For example, a request to ``/foo/`` would redirect to ``/foo`` with this
        decorator. Your request handler mapping should use a regular expression
        like ``r'/foo/*'`` in conjunction with using the decorator.
        """
        @functools.wraps(method)
        def wrapper(self, *args, **kwargs):
            if self.request.path.endswith("/"):
                if self.request.method in ("GET", "HEAD"):
                    uri = self.request.path.rstrip("/")
                    if uri:  # don't try to redirect '/' to ''
                        if self.request.query:
                            uri += "?" + self.request.query
                        self.redirect(uri, permanent=True)
                        return
                else:
                    raise HTTPError(404)
            return method(self, *args, **kwargs)
        return wrapper
    
    
    def addslash(method):
        """Use this decorator to add a missing trailing slash to the request path.
    
        For example, a request to ``/foo`` would redirect to ``/foo/`` with this
        decorator. Your request handler mapping should use a regular expression
        like ``r'/foo/?'`` in conjunction with using the decorator.
        """
        @functools.wraps(method)
        def wrapper(self, *args, **kwargs):
            if not self.request.path.endswith("/"):
                if self.request.method in ("GET", "HEAD"):
                    uri = self.request.path + "/"
                    if self.request.query:
                        uri += "?" + self.request.query
                    self.redirect(uri, permanent=True)
                    return
                raise HTTPError(404)
            return method(self, *args, **kwargs)
        return wrapper
    
    
    class _ApplicationRouter(ReversibleRuleRouter):
        """Routing implementation used internally by `Application`.
    
        Provides a binding between `Application` and `RequestHandler`.
        This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
            * it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
            * it allows to use a list/tuple of rules as `~.routing.Rule` target.
            ``process_rule`` implementation will substitute this list with an appropriate
            `_ApplicationRouter` instance.
        """
    
        def __init__(self, application, rules=None):
            assert isinstance(application, Application)
            self.application = application
            super(_ApplicationRouter, self).__init__(rules)
    
        def process_rule(self, rule):
            rule = super(_ApplicationRouter, self).process_rule(rule)
    
            if isinstance(rule.target, (list, tuple)):
                rule.target = _ApplicationRouter(self.application, rule.target)
    
            return rule
    
        def get_target_delegate(self, target, request, **target_params):
            if isclass(target) and issubclass(target, RequestHandler):
                return self.application.get_handler_delegate(request, target, **target_params)
    
            return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
    
    
    class Application(ReversibleRouter):
        """A collection of request handlers that make up a web application.
    
        Instances of this class are callable and can be passed directly to
        HTTPServer to serve the application::
    
            application = web.Application([
                (r"/", MainPageHandler),
            ])
            http_server = httpserver.HTTPServer(application)
            http_server.listen(8080)
            ioloop.IOLoop.current().start()
    
        The constructor for this class takes in a list of `~.routing.Rule`
        objects or tuples of values corresponding to the arguments of
        `~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
        the values in square brackets being optional. The default matcher is
        `~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
        instead of ``(PathMatches(regexp), target)``.
    
        A common routing target is a `RequestHandler` subclass, but you can also
        use lists of rules as a target, which create a nested routing configuration::
    
            application = web.Application([
                (HostMatches("example.com"), [
                    (r"/", MainPageHandler),
                    (r"/feed", FeedHandler),
                ]),
            ])
    
        In addition to this you can use nested `~.routing.Router` instances,
        `~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
        (see `~.routing` module docs for more information).
    
        When we receive requests, we iterate over the list in order and
        instantiate an instance of the first request class whose regexp
        matches the request path. The request class can be specified as
        either a class object or a (fully-qualified) name.
    
        A dictionary may be passed as the third element (``target_kwargs``)
        of the tuple, which will be used as keyword arguments to the handler's
        constructor and `~RequestHandler.initialize` method. This pattern
        is used for the `StaticFileHandler` in this example (note that a
        `StaticFileHandler` can be installed automatically with the
        static_path setting described below)::
    
            application = web.Application([
                (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
            ])
    
        We support virtual hosts with the `add_handlers` method, which takes in
        a host regular expression as the first argument::
    
            application.add_handlers(r"www\.myhost\.com", [
                (r"/article/([0-9]+)", ArticleHandler),
            ])
    
        If there's no match for the current request's host, then ``default_host``
        parameter value is matched against host regular expressions.
    
        You can serve static files by sending the ``static_path`` setting
        as a keyword argument. We will serve those files from the
        ``/static/`` URI (this is configurable with the
        ``static_url_prefix`` setting), and we will serve ``/favicon.ico``
        and ``/robots.txt`` from the same directory.  A custom subclass of
        `StaticFileHandler` can be specified with the
        ``static_handler_class`` setting.
    
        .. versionchanged:: 4.5
           Integration with the new `tornado.routing` module.
        """
        def __init__(self, handlers=None, default_host=None, transforms=None,
                     **settings):
            if transforms is None:
                self.transforms = []
                if settings.get("compress_response") or settings.get("gzip"):
                    self.transforms.append(GZipContentEncoding)
            else:
                self.transforms = transforms
            self.default_host = default_host
            self.settings = settings
            self.ui_modules = {'linkify': _linkify,
                               'xsrf_form_html': _xsrf_form_html,
                               'Template': TemplateModule,
                               }
            self.ui_methods = {}
            self._load_ui_modules(settings.get("ui_modules", {}))
            self._load_ui_methods(settings.get("ui_methods", {}))
            if self.settings.get("static_path"):
                path = self.settings["static_path"]
                handlers = list(handlers or [])
                static_url_prefix = settings.get("static_url_prefix",
                                                 "/static/")
                static_handler_class = settings.get("static_handler_class",
                                                    StaticFileHandler)
                static_handler_args = settings.get("static_handler_args", {})
                static_handler_args['path'] = path
                for pattern in [re.escape(static_url_prefix) + r"(.*)",
                                r"/(favicon\.ico)", r"/(robots\.txt)"]:
                    handlers.insert(0, (pattern, static_handler_class,
                                        static_handler_args))
    
            if self.settings.get('debug'):
                self.settings.setdefault('autoreload', True)
                self.settings.setdefault('compiled_template_cache', False)
                self.settings.setdefault('static_hash_cache', False)
                self.settings.setdefault('serve_traceback', True)
    
            self.wildcard_router = _ApplicationRouter(self, handlers)
            self.default_router = _ApplicationRouter(self, [
                Rule(AnyMatches(), self.wildcard_router)
            ])
    
            # Automatically reload modified modules
            if self.settings.get('autoreload'):
                from tornado import autoreload
                autoreload.start()
    
        def listen(self, port, address="", **kwargs):
            """Starts an HTTP server for this application on the given port.
    
            This is a convenience alias for creating an `.HTTPServer`
            object and calling its listen method.  Keyword arguments not
            supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
            `.HTTPServer` constructor.  For advanced uses
            (e.g. multi-process mode), do not use this method; create an
            `.HTTPServer` and call its
            `.TCPServer.bind`/`.TCPServer.start` methods directly.
    
            Note that after calling this method you still need to call
            ``IOLoop.current().start()`` to start the server.
    
            Returns the `.HTTPServer` object.
    
            .. versionchanged:: 4.3
               Now returns the `.HTTPServer` object.
            """
            # import is here rather than top level because HTTPServer
            # is not importable on appengine
            from tornado.httpserver import HTTPServer
            server = HTTPServer(self, **kwargs)
            server.listen(port, address)
            return server
    
        def add_handlers(self, host_pattern, host_handlers):
            """Appends the given handlers to our handler list.
    
            Host patterns are processed sequentially in the order they were
            added. All matching patterns will be considered.
            """
            host_matcher = HostMatches(host_pattern)
            rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
    
            self.default_router.rules.insert(-1, rule)
    
            if self.default_host is not None:
                self.wildcard_router.add_rules([(
                    DefaultHostMatches(self, host_matcher.host_pattern),
                    host_handlers
                )])
    
        def add_transform(self, transform_class):
            self.transforms.append(transform_class)
    
        def _load_ui_methods(self, methods):
            if isinstance(methods, types.ModuleType):
                self._load_ui_methods(dict((n, getattr(methods, n))
                                           for n in dir(methods)))
            elif isinstance(methods, list):
                for m in methods:
                    self._load_ui_methods(m)
            else:
                for name, fn in methods.items():
                    if not name.startswith("_") and hasattr(fn, "__call__") \
                            and name[0].lower() == name[0]:
                        self.ui_methods[name] = fn
    
        def _load_ui_modules(self, modules):
            if isinstance(modules, types.ModuleType):
                self._load_ui_modules(dict((n, getattr(modules, n))
                                           for n in dir(modules)))
            elif isinstance(modules, list):
                for m in modules:
                    self._load_ui_modules(m)
            else:
                assert isinstance(modules, dict)
                for name, cls in modules.items():
                    try:
                        if issubclass(cls, UIModule):
                            self.ui_modules[name] = cls
                    except TypeError:
                        pass
    
        def __call__(self, request):
            # Legacy HTTPServer interface
            dispatcher = self.find_handler(request)
            return dispatcher.execute()
    
        def find_handler(self, request, **kwargs):
            route = self.default_router.find_handler(request)
            if route is not None:
                return route
    
            if self.settings.get('default_handler_class'):
                return self.get_handler_delegate(
                    request,
                    self.settings['default_handler_class'],
                    self.settings.get('default_handler_args', {}))
    
            return self.get_handler_delegate(
                request, ErrorHandler, {'status_code': 404})
    
        def get_handler_delegate(self, request, target_class, target_kwargs=None,
                                 path_args=None, path_kwargs=None):
            """Returns `~.httputil.HTTPMessageDelegate` that can serve a request
            for application and `RequestHandler` subclass.
    
            :arg httputil.HTTPServerRequest request: current HTTP request.
            :arg RequestHandler target_class: a `RequestHandler` class.
            :arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
            :arg list path_args: positional arguments for ``target_class`` HTTP method that
                will be executed while handling a request (``get``, ``post`` or any other).
            :arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
            """
            return _HandlerDelegate(
                self, request, target_class, target_kwargs, path_args, path_kwargs)
    
        def reverse_url(self, name, *args):
            """Returns a URL path for handler named ``name``
    
            The handler must be added to the application as a named `URLSpec`.
    
            Args will be substituted for capturing groups in the `URLSpec` regex.
            They will be converted to strings if necessary, encoded as utf8,
            and url-escaped.
            """
            reversed_url = self.default_router.reverse_url(name, *args)
            if reversed_url is not None:
                return reversed_url
    
            raise KeyError("%s not found in named urls" % name)
    
        def log_request(self, handler):
            """Writes a completed HTTP request to the logs.
    
            By default writes to the python root logger.  To change
            this behavior either subclass Application and override this method,
            or pass a function in the application settings dictionary as
            ``log_function``.
            """
            if "log_function" in self.settings:
                self.settings["log_function"](handler)
                return
            if handler.get_status() < 400:
                log_method = access_log.info
            elif handler.get_status() < 500:
                log_method = access_log.warning
            else:
                log_method = access_log.error
            request_time = 1000.0 * handler.request.request_time()
            log_method("%d %s %.2fms", handler.get_status(),
                       handler._request_summary(), request_time)
    
    
    class _HandlerDelegate(httputil.HTTPMessageDelegate):
        def __init__(self, application, request, handler_class, handler_kwargs,
                     path_args, path_kwargs):
            self.application = application
            self.connection = request.connection
            self.request = request
            self.handler_class = handler_class
            self.handler_kwargs = handler_kwargs or {}
            self.path_args = path_args or []
            self.path_kwargs = path_kwargs or {}
            self.chunks = []
            self.stream_request_body = _has_stream_request_body(self.handler_class)
    
        def headers_received(self, start_line, headers):
            if self.stream_request_body:
                self.request.body = Future()
                return self.execute()
    
        def data_received(self, data):
            if self.stream_request_body:
                return self.handler.data_received(data)
            else:
                self.chunks.append(data)
    
        def finish(self):
            if self.stream_request_body:
                self.request.body.set_result(None)
            else:
                self.request.body = b''.join(self.chunks)
                self.request._parse_body()
                self.execute()
    
        def on_connection_close(self):
            if self.stream_request_body:
                self.handler.on_connection_close()
            else:
                self.chunks = None
    
        def execute(self):
            # If template cache is disabled (usually in the debug mode),
            # re-compile templates and reload static files on every
            # request so you don't need to restart to see changes
            if not self.application.settings.get("compiled_template_cache", True):
                with RequestHandler._template_loader_lock:
                    for loader in RequestHandler._template_loaders.values():
                        loader.reset()
            if not self.application.settings.get('static_hash_cache', True):
                StaticFileHandler.reset()
    
            self.handler = self.handler_class(self.application, self.request,
                                              **self.handler_kwargs)
            transforms = [t(self.request) for t in self.application.transforms]
    
            if self.stream_request_body:
                self.handler._prepared_future = Future()
            # Note that if an exception escapes handler._execute it will be
            # trapped in the Future it returns (which we are ignoring here,
            # leaving it to be logged when the Future is GC'd).
            # However, that shouldn't happen because _execute has a blanket
            # except handler, and we cannot easily access the IOLoop here to
            # call add_future (because of the requirement to remain compatible
            # with WSGI)
            self.handler._execute(transforms, *self.path_args,
                                  **self.path_kwargs)
            # If we are streaming the request body, then execute() is finished
            # when the handler has prepared to receive the body.  If not,
            # it doesn't matter when execute() finishes (so we return None)
            return self.handler._prepared_future
    
    
    class HTTPError(Exception):
        """An exception that will turn into an HTTP error response.
    
        Raising an `HTTPError` is a convenient alternative to calling
        `RequestHandler.send_error` since it automatically ends the
        current function.
    
        To customize the response sent with an `HTTPError`, override
        `RequestHandler.write_error`.
    
        :arg int status_code: HTTP status code.  Must be listed in
            `httplib.responses ` unless the ``reason``
            keyword argument is given.
        :arg string log_message: Message to be written to the log for this error
            (will not be shown to the user unless the `Application` is in debug
            mode).  May contain ``%s``-style placeholders, which will be filled
            in with remaining positional parameters.
        :arg string reason: Keyword-only argument.  The HTTP "reason" phrase
            to pass in the status line along with ``status_code``.  Normally
            determined automatically from ``status_code``, but can be used
            to use a non-standard numeric code.
        """
        def __init__(self, status_code=500, log_message=None, *args, **kwargs):
            self.status_code = status_code
            self.log_message = log_message
            self.args = args
            self.reason = kwargs.get('reason', None)
            if log_message and not args:
                self.log_message = log_message.replace('%', '%%')
    
        def __str__(self):
            message = "HTTP %d: %s" % (
                self.status_code,
                self.reason or httputil.responses.get(self.status_code, 'Unknown'))
            if self.log_message:
                return message + " (" + (self.log_message % self.args) + ")"
            else:
                return message
    
    
    class Finish(Exception):
        """An exception that ends the request without producing an error response.
    
        When `Finish` is raised in a `RequestHandler`, the request will
        end (calling `RequestHandler.finish` if it hasn't already been
        called), but the error-handling methods (including
        `RequestHandler.write_error`) will not be called.
    
        If `Finish()` was created with no arguments, the pending response
        will be sent as-is. If `Finish()` was given an argument, that
        argument will be passed to `RequestHandler.finish()`.
    
        This can be a more convenient way to implement custom error pages
        than overriding ``write_error`` (especially in library code)::
    
            if self.current_user is None:
                self.set_status(401)
                self.set_header('WWW-Authenticate', 'Basic realm="something"')
                raise Finish()
    
        .. versionchanged:: 4.3
           Arguments passed to ``Finish()`` will be passed on to
           `RequestHandler.finish`.
        """
        pass
    
    
    class MissingArgumentError(HTTPError):
        """Exception raised by `RequestHandler.get_argument`.
    
        This is a subclass of `HTTPError`, so if it is uncaught a 400 response
        code will be used instead of 500 (and a stack trace will not be logged).
    
        .. versionadded:: 3.1
        """
        def __init__(self, arg_name):
            super(MissingArgumentError, self).__init__(
                400, 'Missing argument %s' % arg_name)
            self.arg_name = arg_name
    
    
    class ErrorHandler(RequestHandler):
        """Generates an error response with ``status_code`` for all requests."""
        def initialize(self, status_code):
            self.set_status(status_code)
    
        def prepare(self):
            raise HTTPError(self._status_code)
    
        def check_xsrf_cookie(self):
            # POSTs to an ErrorHandler don't actually have side effects,
            # so we don't need to check the xsrf token.  This allows POSTs
            # to the wrong url to return a 404 instead of 403.
            pass
    
    
    class RedirectHandler(RequestHandler):
        """Redirects the client to the given URL for all GET requests.
    
        You should provide the keyword argument ``url`` to the handler, e.g.::
    
            application = web.Application([
                (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
            ])
    
        `RedirectHandler` supports regular expression substitutions. E.g., to
        swap the first and second parts of a path while preserving the remainder::
    
            application = web.Application([
                (r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
            ])
    
        The final URL is formatted with `str.format` and the substrings that match
        the capturing groups. In the above example, a request to "/a/b/c" would be
        formatted like::
    
            str.format("/{1}/{0}/{2}", "a", "b", "c")  # -> "/b/a/c"
    
        Use Python's :ref:`format string syntax ` to customize how
        values are substituted.
    
        .. versionchanged:: 4.5
           Added support for substitutions into the destination URL.
        """
        def initialize(self, url, permanent=True):
            self._url = url
            self._permanent = permanent
    
        def get(self, *args):
            self.redirect(self._url.format(*args), permanent=self._permanent)
    
    
    class StaticFileHandler(RequestHandler):
        """A simple handler that can serve static content from a directory.
    
        A `StaticFileHandler` is configured automatically if you pass the
        ``static_path`` keyword argument to `Application`.  This handler
        can be customized with the ``static_url_prefix``, ``static_handler_class``,
        and ``static_handler_args`` settings.
    
        To map an additional path to this handler for a static data directory
        you would add a line to your application like::
    
            application = web.Application([
                (r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
            ])
    
        The handler constructor requires a ``path`` argument, which specifies the
        local root directory of the content to be served.
    
        Note that a capture group in the regex is required to parse the value for
        the ``path`` argument to the get() method (different than the constructor
        argument above); see `URLSpec` for details.
    
        To serve a file like ``index.html`` automatically when a directory is
        requested, set ``static_handler_args=dict(default_filename="index.html")``
        in your application settings, or add ``default_filename`` as an initializer
        argument for your ``StaticFileHandler``.
    
        To maximize the effectiveness of browser caching, this class supports
        versioned urls (by default using the argument ``?v=``).  If a version
        is given, we instruct the browser to cache this file indefinitely.
        `make_static_url` (also available as `RequestHandler.static_url`) can
        be used to construct a versioned url.
    
        This handler is intended primarily for use in development and light-duty
        file serving; for heavy traffic it will be more efficient to use
        a dedicated static file server (such as nginx or Apache).  We support
        the HTTP ``Accept-Ranges`` mechanism to return partial content (because
        some browsers require this functionality to be present to seek in
        HTML5 audio or video).
    
        **Subclassing notes**
    
        This class is designed to be extensible by subclassing, but because
        of the way static urls are generated with class methods rather than
        instance methods, the inheritance patterns are somewhat unusual.
        Be sure to use the ``@classmethod`` decorator when overriding a
        class method.  Instance methods may use the attributes ``self.path``
        ``self.absolute_path``, and ``self.modified``.
    
        Subclasses should only override methods discussed in this section;
        overriding other methods is error-prone.  Overriding
        ``StaticFileHandler.get`` is particularly problematic due to the
        tight coupling with ``compute_etag`` and other methods.
    
        To change the way static urls are generated (e.g. to match the behavior
        of another server or CDN), override `make_static_url`, `parse_url_path`,
        `get_cache_time`, and/or `get_version`.
    
        To replace all interaction with the filesystem (e.g. to serve
        static content from a database), override `get_content`,
        `get_content_size`, `get_modified_time`, `get_absolute_path`, and
        `validate_absolute_path`.
    
        .. versionchanged:: 3.1
           Many of the methods for subclasses were added in Tornado 3.1.
        """
        CACHE_MAX_AGE = 86400 * 365 * 10  # 10 years
    
        _static_hashes = {}  # type: typing.Dict
        _lock = threading.Lock()  # protects _static_hashes
    
        def initialize(self, path, default_filename=None):
            self.root = path
            self.default_filename = default_filename
    
        @classmethod
        def reset(cls):
            with cls._lock:
                cls._static_hashes = {}
    
        def head(self, path):
            return self.get(path, include_body=False)
    
        @gen.coroutine
        def get(self, path, include_body=True):
            # Set up our path instance variables.
            self.path = self.parse_url_path(path)
            del path  # make sure we don't refer to path instead of self.path again
            absolute_path = self.get_absolute_path(self.root, self.path)
            self.absolute_path = self.validate_absolute_path(
                self.root, absolute_path)
            if self.absolute_path is None:
                return
    
            self.modified = self.get_modified_time()
            self.set_headers()
    
            if self.should_return_304():
                self.set_status(304)
                return
    
            request_range = None
            range_header = self.request.headers.get("Range")
            if range_header:
                # As per RFC 2616 14.16, if an invalid Range header is specified,
                # the request will be treated as if the header didn't exist.
                request_range = httputil._parse_request_range(range_header)
    
            size = self.get_content_size()
            if request_range:
                start, end = request_range
                if (start is not None and start >= size) or end == 0:
                    # As per RFC 2616 14.35.1, a range is not satisfiable only: if
                    # the first requested byte is equal to or greater than the
                    # content, or when a suffix with length 0 is specified
                    self.set_status(416)  # Range Not Satisfiable
                    self.set_header("Content-Type", "text/plain")
                    self.set_header("Content-Range", "bytes */%s" % (size, ))
                    return
                if start is not None and start < 0:
                    start += size
                if end is not None and end > size:
                    # Clients sometimes blindly use a large range to limit their
                    # download size; cap the endpoint at the actual file size.
                    end = size
                # Note: only return HTTP 206 if less than the entire range has been
                # requested. Not only is this semantically correct, but Chrome
                # refuses to play audio if it gets an HTTP 206 in response to
                # ``Range: bytes=0-``.
                if size != (end or size) - (start or 0):
                    self.set_status(206)  # Partial Content
                    self.set_header("Content-Range",
                                    httputil._get_content_range(start, end, size))
            else:
                start = end = None
    
            if start is not None and end is not None:
                content_length = end - start
            elif end is not None:
                content_length = end
            elif start is not None:
                content_length = size - start
            else:
                content_length = size
            self.set_header("Content-Length", content_length)
    
            if include_body:
                content = self.get_content(self.absolute_path, start, end)
                if isinstance(content, bytes):
                    content = [content]
                for chunk in content:
                    try:
                        self.write(chunk)
                        yield self.flush()
                    except iostream.StreamClosedError:
                        return
            else:
                assert self.request.method == "HEAD"
    
        def compute_etag(self):
            """Sets the ``Etag`` header based on static url version.
    
            This allows efficient ``If-None-Match`` checks against cached
            versions, and sends the correct ``Etag`` for a partial response
            (i.e. the same ``Etag`` as the full file).
    
            .. versionadded:: 3.1
            """
            version_hash = self._get_cached_version(self.absolute_path)
            if not version_hash:
                return None
            return '"%s"' % (version_hash, )
    
        def set_headers(self):
            """Sets the content and caching headers on the response.
    
            .. versionadded:: 3.1
            """
            self.set_header("Accept-Ranges", "bytes")
            self.set_etag_header()
    
            if self.modified is not None:
                self.set_header("Last-Modified", self.modified)
    
            content_type = self.get_content_type()
            if content_type:
                self.set_header("Content-Type", content_type)
    
            cache_time = self.get_cache_time(self.path, self.modified,
                                             content_type)
            if cache_time > 0:
                self.set_header("Expires", datetime.datetime.utcnow() +
                                datetime.timedelta(seconds=cache_time))
                self.set_header("Cache-Control", "max-age=" + str(cache_time))
    
            self.set_extra_headers(self.path)
    
        def should_return_304(self):
            """Returns True if the headers indicate that we should return 304.
    
            .. versionadded:: 3.1
            """
            if self.check_etag_header():
                return True
    
            # Check the If-Modified-Since, and don't send the result if the
            # content has not been modified
            ims_value = self.request.headers.get("If-Modified-Since")
            if ims_value is not None:
                date_tuple = email.utils.parsedate(ims_value)
                if date_tuple is not None:
                    if_since = datetime.datetime(*date_tuple[:6])
                    if if_since >= self.modified:
                        return True
    
            return False
    
        @classmethod
        def get_absolute_path(cls, root, path):
            """Returns the absolute location of ``path`` relative to ``root``.
    
            ``root`` is the path configured for this `StaticFileHandler`
            (in most cases the ``static_path`` `Application` setting).
    
            This class method may be overridden in subclasses.  By default
            it returns a filesystem path, but other strings may be used
            as long as they are unique and understood by the subclass's
            overridden `get_content`.
    
            .. versionadded:: 3.1
            """
            abspath = os.path.abspath(os.path.join(root, path))
            return abspath
    
        def validate_absolute_path(self, root, absolute_path):
            """Validate and return the absolute path.
    
            ``root`` is the configured path for the `StaticFileHandler`,
            and ``path`` is the result of `get_absolute_path`
    
            This is an instance method called during request processing,
            so it may raise `HTTPError` or use methods like
            `RequestHandler.redirect` (return None after redirecting to
            halt further processing).  This is where 404 errors for missing files
            are generated.
    
            This method may modify the path before returning it, but note that
            any such modifications will not be understood by `make_static_url`.
    
            In instance methods, this method's result is available as
            ``self.absolute_path``.
    
            .. versionadded:: 3.1
            """
            # os.path.abspath strips a trailing /.
            # We must add it back to `root` so that we only match files
            # in a directory named `root` instead of files starting with
            # that prefix.
            root = os.path.abspath(root)
            if not root.endswith(os.path.sep):
                # abspath always removes a trailing slash, except when
                # root is '/'. This is an unusual case, but several projects
                # have independently discovered this technique to disable
                # Tornado's path validation and (hopefully) do their own,
                # so we need to support it.
                root += os.path.sep
            # The trailing slash also needs to be temporarily added back
            # the requested path so a request to root/ will match.
            if not (absolute_path + os.path.sep).startswith(root):
                raise HTTPError(403, "%s is not in root static directory",
                                self.path)
            if (os.path.isdir(absolute_path) and
                    self.default_filename is not None):
                # need to look at the request.path here for when path is empty
                # but there is some prefix to the path that was already
                # trimmed by the routing
                if not self.request.path.endswith("/"):
                    self.redirect(self.request.path + "/", permanent=True)
                    return
                absolute_path = os.path.join(absolute_path, self.default_filename)
            if not os.path.exists(absolute_path):
                raise HTTPError(404)
            if not os.path.isfile(absolute_path):
                raise HTTPError(403, "%s is not a file", self.path)
            return absolute_path
    
        @classmethod
        def get_content(cls, abspath, start=None, end=None):
            """Retrieve the content of the requested resource which is located
            at the given absolute path.
    
            This class method may be overridden by subclasses.  Note that its
            signature is different from other overridable class methods
            (no ``settings`` argument); this is deliberate to ensure that
            ``abspath`` is able to stand on its own as a cache key.
    
            This method should either return a byte string or an iterator
            of byte strings.  The latter is preferred for large files
            as it helps reduce memory fragmentation.
    
            .. versionadded:: 3.1
            """
            with open(abspath, "rb") as file:
                if start is not None:
                    file.seek(start)
                if end is not None:
                    remaining = end - (start or 0)
                else:
                    remaining = None
                while True:
                    chunk_size = 64 * 1024
                    if remaining is not None and remaining < chunk_size:
                        chunk_size = remaining
                    chunk = file.read(chunk_size)
                    if chunk:
                        if remaining is not None:
                            remaining -= len(chunk)
                        yield chunk
                    else:
                        if remaining is not None:
                            assert remaining == 0
                        return
    
        @classmethod
        def get_content_version(cls, abspath):
            """Returns a version string for the resource at the given path.
    
            This class method may be overridden by subclasses.  The
            default implementation is a hash of the file's contents.
    
            .. versionadded:: 3.1
            """
            data = cls.get_content(abspath)
            hasher = hashlib.md5()
            if isinstance(data, bytes):
                hasher.update(data)
            else:
                for chunk in data:
                    hasher.update(chunk)
            return hasher.hexdigest()
    
        def _stat(self):
            if not hasattr(self, '_stat_result'):
                self._stat_result = os.stat(self.absolute_path)
            return self._stat_result
    
        def get_content_size(self):
            """Retrieve the total size of the resource at the given path.
    
            This method may be overridden by subclasses.
    
            .. versionadded:: 3.1
    
            .. versionchanged:: 4.0
               This method is now always called, instead of only when
               partial results are requested.
            """
            stat_result = self._stat()
            return stat_result[stat.ST_SIZE]
    
        def get_modified_time(self):
            """Returns the time that ``self.absolute_path`` was last modified.
    
            May be overridden in subclasses.  Should return a `~datetime.datetime`
            object or None.
    
            .. versionadded:: 3.1
            """
            stat_result = self._stat()
            modified = datetime.datetime.utcfromtimestamp(
                stat_result[stat.ST_MTIME])
            return modified
    
        def get_content_type(self):
            """Returns the ``Content-Type`` header to be used for this request.
    
            .. versionadded:: 3.1
            """
            mime_type, encoding = mimetypes.guess_type(self.absolute_path)
            # per RFC 6713, use the appropriate type for a gzip compressed file
            if encoding == "gzip":
                return "application/gzip"
            # As of 2015-07-21 there is no bzip2 encoding defined at
            # http://www.iana.org/assignments/media-types/media-types.xhtml
            # So for that (and any other encoding), use octet-stream.
            elif encoding is not None:
                return "application/octet-stream"
            elif mime_type is not None:
                return mime_type
            # if mime_type not detected, use application/octet-stream
            else:
                return "application/octet-stream"
    
        def set_extra_headers(self, path):
            """For subclass to add extra headers to the response"""
            pass
    
        def get_cache_time(self, path, modified, mime_type):
            """Override to customize cache control behavior.
    
            Return a positive number of seconds to make the result
            cacheable for that amount of time or 0 to mark resource as
            cacheable for an unspecified amount of time (subject to
            browser heuristics).
    
            By default returns cache expiry of 10 years for resources requested
            with ``v`` argument.
            """
            return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
    
        @classmethod
        def make_static_url(cls, settings, path, include_version=True):
            """Constructs a versioned url for the given path.
    
            This method may be overridden in subclasses (but note that it
            is a class method rather than an instance method).  Subclasses
            are only required to implement the signature
            ``make_static_url(cls, settings, path)``; other keyword
            arguments may be passed through `~RequestHandler.static_url`
            but are not standard.
    
            ``settings`` is the `Application.settings` dictionary.  ``path``
            is the static path being requested.  The url returned should be
            relative to the current host.
    
            ``include_version`` determines whether the generated URL should
            include the query string containing the version hash of the
            file corresponding to the given ``path``.
    
            """
            url = settings.get('static_url_prefix', '/static/') + path
            if not include_version:
                return url
    
            version_hash = cls.get_version(settings, path)
            if not version_hash:
                return url
    
            return '%s?v=%s' % (url, version_hash)
    
        def parse_url_path(self, url_path):
            """Converts a static URL path into a filesystem path.
    
            ``url_path`` is the path component of the URL with
            ``static_url_prefix`` removed.  The return value should be
            filesystem path relative to ``static_path``.
    
            This is the inverse of `make_static_url`.
            """
            if os.path.sep != "/":
                url_path = url_path.replace("/", os.path.sep)
            return url_path
    
        @classmethod
        def get_version(cls, settings, path):
            """Generate the version string to be used in static URLs.
    
            ``settings`` is the `Application.settings` dictionary and ``path``
            is the relative location of the requested asset on the filesystem.
            The returned value should be a string, or ``None`` if no version
            could be determined.
    
            .. versionchanged:: 3.1
               This method was previously recommended for subclasses to override;
               `get_content_version` is now preferred as it allows the base
               class to handle caching of the result.
            """
            abs_path = cls.get_absolute_path(settings['static_path'], path)
            return cls._get_cached_version(abs_path)
    
        @classmethod
        def _get_cached_version(cls, abs_path):
            with cls._lock:
                hashes = cls._static_hashes
                if abs_path not in hashes:
                    try:
                        hashes[abs_path] = cls.get_content_version(abs_path)
                    except Exception:
                        gen_log.error("Could not open static file %r", abs_path)
                        hashes[abs_path] = None
                hsh = hashes.get(abs_path)
                if hsh:
                    return hsh
            return None
    
    
    class FallbackHandler(RequestHandler):
        """A `RequestHandler` that wraps another HTTP server callback.
    
        The fallback is a callable object that accepts an
        `~.httputil.HTTPServerRequest`, such as an `Application` or
        `tornado.wsgi.WSGIContainer`.  This is most useful to use both
        Tornado ``RequestHandlers`` and WSGI in the same server.  Typical
        usage::
    
            wsgi_app = tornado.wsgi.WSGIContainer(
                django.core.handlers.wsgi.WSGIHandler())
            application = tornado.web.Application([
                (r"/foo", FooHandler),
                (r".*", FallbackHandler, dict(fallback=wsgi_app),
            ])
        """
        def initialize(self, fallback):
            self.fallback = fallback
    
        def prepare(self):
            self.fallback(self.request)
            self._finished = True
    
    
    class OutputTransform(object):
        """A transform modifies the result of an HTTP request (e.g., GZip encoding)
    
        Applications are not expected to create their own OutputTransforms
        or interact with them directly; the framework chooses which transforms
        (if any) to apply.
        """
        def __init__(self, request):
            pass
    
        def transform_first_chunk(self, status_code, headers, chunk, finishing):
            # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
            return status_code, headers, chunk
    
        def transform_chunk(self, chunk, finishing):
            return chunk
    
    
    class GZipContentEncoding(OutputTransform):
        """Applies the gzip content encoding to the response.
    
        See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
    
        .. versionchanged:: 4.0
            Now compresses all mime types beginning with ``text/``, instead
            of just a whitelist. (the whitelist is still used for certain
            non-text mime types).
        """
        # Whitelist of compressible mime types (in addition to any types
        # beginning with "text/").
        CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
                             "application/xml", "application/atom+xml",
                             "application/json", "application/xhtml+xml",
                             "image/svg+xml"])
        # Python's GzipFile defaults to level 9, while most other gzip
        # tools (including gzip itself) default to 6, which is probably a
        # better CPU/size tradeoff.
        GZIP_LEVEL = 6
        # Responses that are too short are unlikely to benefit from gzipping
        # after considering the "Content-Encoding: gzip" header and the header
        # inside the gzip encoding.
        # Note that responses written in multiple chunks will be compressed
        # regardless of size.
        MIN_LENGTH = 1024
    
        def __init__(self, request):
            self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
    
        def _compressible_type(self, ctype):
            return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
    
        def transform_first_chunk(self, status_code, headers, chunk, finishing):
            # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
            # TODO: can/should this type be inherited from the superclass?
            if 'Vary' in headers:
                headers['Vary'] += ', Accept-Encoding'
            else:
                headers['Vary'] = 'Accept-Encoding'
            if self._gzipping:
                ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
                self._gzipping = self._compressible_type(ctype) and \
                    (not finishing or len(chunk) >= self.MIN_LENGTH) and \
                    ("Content-Encoding" not in headers)
            if self._gzipping:
                headers["Content-Encoding"] = "gzip"
                self._gzip_value = BytesIO()
                self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
                                                compresslevel=self.GZIP_LEVEL)
                chunk = self.transform_chunk(chunk, finishing)
                if "Content-Length" in headers:
                    # The original content length is no longer correct.
                    # If this is the last (and only) chunk, we can set the new
                    # content-length; otherwise we remove it and fall back to
                    # chunked encoding.
                    if finishing:
                        headers["Content-Length"] = str(len(chunk))
                    else:
                        del headers["Content-Length"]
            return status_code, headers, chunk
    
        def transform_chunk(self, chunk, finishing):
            if self._gzipping:
                self._gzip_file.write(chunk)
                if finishing:
                    self._gzip_file.close()
                else:
                    self._gzip_file.flush()
                chunk = self._gzip_value.getvalue()
                self._gzip_value.truncate(0)
                self._gzip_value.seek(0)
            return chunk
    
    
    def authenticated(method):
        """Decorate methods with this to require that the user be logged in.
    
        If the user is not logged in, they will be redirected to the configured
        `login url `.
    
        If you configure a login url with a query parameter, Tornado will
        assume you know what you're doing and use it as-is.  If not, it
        will add a `next` parameter so the login page knows where to send
        you once you're logged in.
        """
        @functools.wraps(method)
        def wrapper(self, *args, **kwargs):
            if not self.current_user:
                if self.request.method in ("GET", "HEAD"):
                    url = self.get_login_url()
                    if "?" not in url:
                        if urlparse.urlsplit(url).scheme:
                            # if login url is absolute, make next absolute too
                            next_url = self.request.full_url()
                        else:
                            next_url = self.request.uri
                        url += "?" + urlencode(dict(next=next_url))
                    self.redirect(url)
                    return
                raise HTTPError(403)
            return method(self, *args, **kwargs)
        return wrapper
    
    
    class UIModule(object):
        """A re-usable, modular UI unit on a page.
    
        UI modules often execute additional queries, and they can include
        additional CSS and JavaScript that will be included in the output
        page, which is automatically inserted on page render.
    
        Subclasses of UIModule must override the `render` method.
        """
        def __init__(self, handler):
            self.handler = handler
            self.request = handler.request
            self.ui = handler.ui
            self.locale = handler.locale
    
        @property
        def current_user(self):
            return self.handler.current_user
    
        def render(self, *args, **kwargs):
            """Override in subclasses to return this module's output."""
            raise NotImplementedError()
    
        def embedded_javascript(self):
            """Override to return a JavaScript string
            to be embedded in the page."""
            return None
    
        def javascript_files(self):
            """Override to return a list of JavaScript files needed by this module.
    
            If the return values are relative paths, they will be passed to
            `RequestHandler.static_url`; otherwise they will be used as-is.
            """
            return None
    
        def embedded_css(self):
            """Override to return a CSS string
            that will be embedded in the page."""
            return None
    
        def css_files(self):
            """Override to returns a list of CSS files required by this module.
    
            If the return values are relative paths, they will be passed to
            `RequestHandler.static_url`; otherwise they will be used as-is.
            """
            return None
    
        def html_head(self):
            """Override to return an HTML string that will be put in the 
            element.
            """
            return None
    
        def html_body(self):
            """Override to return an HTML string that will be put at the end of
            the  element.
            """
            return None
    
        def render_string(self, path, **kwargs):
            """Renders a template and returns it as a string."""
            return self.handler.render_string(path, **kwargs)
    
    
    class _linkify(UIModule):
        def render(self, text, **kwargs):
            return escape.linkify(text, **kwargs)
    
    
    class _xsrf_form_html(UIModule):
        def render(self):
            return self.handler.xsrf_form_html()
    
    
    class TemplateModule(UIModule):
        """UIModule that simply renders the given template.
    
        {% module Template("foo.html") %} is similar to {% include "foo.html" %},
        but the module version gets its own namespace (with kwargs passed to
        Template()) instead of inheriting the outer template's namespace.
    
        Templates rendered through this module also get access to UIModule's
        automatic javascript/css features.  Simply call set_resources
        inside the template and give it keyword arguments corresponding to
        the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
        Note that these resources are output once per template file, not once
        per instantiation of the template, so they must not depend on
        any arguments to the template.
        """
        def __init__(self, handler):
            super(TemplateModule, self).__init__(handler)
            # keep resources in both a list and a dict to preserve order
            self._resource_list = []
            self._resource_dict = {}
    
        def render(self, path, **kwargs):
            def set_resources(**kwargs):
                if path not in self._resource_dict:
                    self._resource_list.append(kwargs)
                    self._resource_dict[path] = kwargs
                else:
                    if self._resource_dict[path] != kwargs:
                        raise ValueError("set_resources called with different "
                                         "resources for the same template")
                return ""
            return self.render_string(path, set_resources=set_resources,
                                      **kwargs)
    
        def _get_resources(self, key):
            return (r[key] for r in self._resource_list if key in r)
    
        def embedded_javascript(self):
            return "\n".join(self._get_resources("embedded_javascript"))
    
        def javascript_files(self):
            result = []
            for f in self._get_resources("javascript_files"):
                if isinstance(f, (unicode_type, bytes)):
                    result.append(f)
                else:
                    result.extend(f)
            return result
    
        def embedded_css(self):
            return "\n".join(self._get_resources("embedded_css"))
    
        def css_files(self):
            result = []
            for f in self._get_resources("css_files"):
                if isinstance(f, (unicode_type, bytes)):
                    result.append(f)
                else:
                    result.extend(f)
            return result
    
        def html_head(self):
            return "".join(self._get_resources("html_head"))
    
        def html_body(self):
            return "".join(self._get_resources("html_body"))
    
    
    class _UIModuleNamespace(object):
        """Lazy namespace which creates UIModule proxies bound to a handler."""
        def __init__(self, handler, ui_modules):
            self.handler = handler
            self.ui_modules = ui_modules
    
        def __getitem__(self, key):
            return self.handler._ui_module(key, self.ui_modules[key])
    
        def __getattr__(self, key):
            try:
                return self[key]
            except KeyError as e:
                raise AttributeError(str(e))
    
    
    if hasattr(hmac, 'compare_digest'):  # python 3.3
        _time_independent_equals = hmac.compare_digest
    else:
        def _time_independent_equals(a, b):
            if len(a) != len(b):
                return False
            result = 0
            if isinstance(a[0], int):  # python3 byte strings
                for x, y in zip(a, b):
                    result |= x ^ y
            else:  # python2
                for x, y in zip(a, b):
                    result |= ord(x) ^ ord(y)
            return result == 0
    
    
    def create_signed_value(secret, name, value, version=None, clock=None,
                            key_version=None):
        if version is None:
            version = DEFAULT_SIGNED_VALUE_VERSION
        if clock is None:
            clock = time.time
    
        timestamp = utf8(str(int(clock())))
        value = base64.b64encode(utf8(value))
        if version == 1:
            signature = _create_signature_v1(secret, name, value, timestamp)
            value = b"|".join([value, timestamp, signature])
            return value
        elif version == 2:
            # The v2 format consists of a version number and a series of
            # length-prefixed fields "%d:%s", the last of which is a
            # signature, all separated by pipes.  All numbers are in
            # decimal format with no leading zeros.  The signature is an
            # HMAC-SHA256 of the whole string up to that point, including
            # the final pipe.
            #
            # The fields are:
            # - format version (i.e. 2; no length prefix)
            # - key version (integer, default is 0)
            # - timestamp (integer seconds since epoch)
            # - name (not encoded; assumed to be ~alphanumeric)
            # - value (base64-encoded)
            # - signature (hex-encoded; no length prefix)
            def format_field(s):
                return utf8("%d:" % len(s)) + utf8(s)
            to_sign = b"|".join([
                b"2",
                format_field(str(key_version or 0)),
                format_field(timestamp),
                format_field(name),
                format_field(value),
                b''])
    
            if isinstance(secret, dict):
                assert key_version is not None, 'Key version must be set when sign key dict is used'
                assert version >= 2, 'Version must be at least 2 for key version support'
                secret = secret[key_version]
    
            signature = _create_signature_v2(secret, to_sign)
            return to_sign + signature
        else:
            raise ValueError("Unsupported version %d" % version)
    
    
    # A leading version number in decimal
    # with no leading zeros, followed by a pipe.
    _signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
    
    
    def _get_version(value):
        # Figures out what version value is.  Version 1 did not include an
        # explicit version field and started with arbitrary base64 data,
        # which makes this tricky.
        m = _signed_value_version_re.match(value)
        if m is None:
            version = 1
        else:
            try:
                version = int(m.group(1))
                if version > 999:
                    # Certain payloads from the version-less v1 format may
                    # be parsed as valid integers.  Due to base64 padding
                    # restrictions, this can only happen for numbers whose
                    # length is a multiple of 4, so we can treat all
                    # numbers up to 999 as versions, and for the rest we
                    # fall back to v1 format.
                    version = 1
            except ValueError:
                version = 1
        return version
    
    
    def decode_signed_value(secret, name, value, max_age_days=31,
                            clock=None, min_version=None):
        if clock is None:
            clock = time.time
        if min_version is None:
            min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
        if min_version > 2:
            raise ValueError("Unsupported min_version %d" % min_version)
        if not value:
            return None
    
        value = utf8(value)
        version = _get_version(value)
    
        if version < min_version:
            return None
        if version == 1:
            return _decode_signed_value_v1(secret, name, value,
                                           max_age_days, clock)
        elif version == 2:
            return _decode_signed_value_v2(secret, name, value,
                                           max_age_days, clock)
        else:
            return None
    
    
    def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
        parts = utf8(value).split(b"|")
        if len(parts) != 3:
            return None
        signature = _create_signature_v1(secret, name, parts[0], parts[1])
        if not _time_independent_equals(parts[2], signature):
            gen_log.warning("Invalid cookie signature %r", value)
            return None
        timestamp = int(parts[1])
        if timestamp < clock() - max_age_days * 86400:
            gen_log.warning("Expired cookie %r", value)
            return None
        if timestamp > clock() + 31 * 86400:
            # _cookie_signature does not hash a delimiter between the
            # parts of the cookie, so an attacker could transfer trailing
            # digits from the payload to the timestamp without altering the
            # signature.  For backwards compatibility, sanity-check timestamp
            # here instead of modifying _cookie_signature.
            gen_log.warning("Cookie timestamp in future; possible tampering %r",
                            value)
            return None
        if parts[1].startswith(b"0"):
            gen_log.warning("Tampered cookie %r", value)
            return None
        try:
            return base64.b64decode(parts[0])
        except Exception:
            return None
    
    
    def _decode_fields_v2(value):
        def _consume_field(s):
            length, _, rest = s.partition(b':')
            n = int(length)
            field_value = rest[:n]
            # In python 3, indexing bytes returns small integers; we must
            # use a slice to get a byte string as in python 2.
            if rest[n:n + 1] != b'|':
                raise ValueError("malformed v2 signed value field")
            rest = rest[n + 1:]
            return field_value, rest
    
        rest = value[2:]  # remove version number
        key_version, rest = _consume_field(rest)
        timestamp, rest = _consume_field(rest)
        name_field, rest = _consume_field(rest)
        value_field, passed_sig = _consume_field(rest)
        return int(key_version), timestamp, name_field, value_field, passed_sig
    
    
    def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
        try:
            key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
        except ValueError:
            return None
        signed_string = value[:-len(passed_sig)]
    
        if isinstance(secret, dict):
            try:
                secret = secret[key_version]
            except KeyError:
                return None
    
        expected_sig = _create_signature_v2(secret, signed_string)
        if not _time_independent_equals(passed_sig, expected_sig):
            return None
        if name_field != utf8(name):
            return None
        timestamp = int(timestamp)
        if timestamp < clock() - max_age_days * 86400:
            # The signature has expired.
            return None
        try:
            return base64.b64decode(value_field)
        except Exception:
            return None
    
    
    def get_signature_key_version(value):
        value = utf8(value)
        version = _get_version(value)
        if version < 2:
            return None
        try:
            key_version, _, _, _, _ = _decode_fields_v2(value)
        except ValueError:
            return None
    
        return key_version
    
    
    def _create_signature_v1(secret, *parts):
        hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
        for part in parts:
            hash.update(utf8(part))
        return utf8(hash.hexdigest())
    
    
    def _create_signature_v2(secret, s):
        hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
        hash.update(utf8(s))
        return utf8(hash.hexdigest())
    
    
    def is_absolute(path):
        return any(path.startswith(x) for x in ["/", "http:", "https:"])
    tornado-4.5.3/tornado/websocket.py000066400000000000000000001354571322420601000172040ustar00rootroot00000000000000"""Implementation of the WebSocket protocol.
    
    `WebSockets `_ allow for bidirectional
    communication between the browser and server.
    
    WebSockets are supported in the current versions of all major browsers,
    although older versions that do not support WebSockets are still in use
    (refer to http://caniuse.com/websockets for details).
    
    This module implements the final version of the WebSocket protocol as
    defined in `RFC 6455 `_.  Certain
    browser versions (notably Safari 5.x) implemented an earlier draft of
    the protocol (known as "draft 76") and are not compatible with this module.
    
    .. versionchanged:: 4.0
       Removed support for the draft 76 protocol version.
    """
    
    from __future__ import absolute_import, division, print_function
    # Author: Jacob Kristhammar, 2010
    
    import base64
    import collections
    import hashlib
    import os
    import struct
    import tornado.escape
    import tornado.web
    import zlib
    
    from tornado.concurrent import TracebackFuture
    from tornado.escape import utf8, native_str, to_unicode
    from tornado import gen, httpclient, httputil
    from tornado.ioloop import IOLoop, PeriodicCallback
    from tornado.iostream import StreamClosedError
    from tornado.log import gen_log, app_log
    from tornado import simple_httpclient
    from tornado.tcpclient import TCPClient
    from tornado.util import _websocket_mask, PY3
    
    if PY3:
        from urllib.parse import urlparse  # py2
        xrange = range
    else:
        from urlparse import urlparse  # py3
    
    
    class WebSocketError(Exception):
        pass
    
    
    class WebSocketClosedError(WebSocketError):
        """Raised by operations on a closed connection.
    
        .. versionadded:: 3.2
        """
        pass
    
    
    class WebSocketHandler(tornado.web.RequestHandler):
        """Subclass this class to create a basic WebSocket handler.
    
        Override `on_message` to handle incoming messages, and use
        `write_message` to send messages to the client. You can also
        override `open` and `on_close` to handle opened and closed
        connections.
    
        Custom upgrade response headers can be sent by overriding
        `~tornado.web.RequestHandler.set_default_headers` or
        `~tornado.web.RequestHandler.prepare`.
    
        See http://dev.w3.org/html5/websockets/ for details on the
        JavaScript interface.  The protocol is specified at
        http://tools.ietf.org/html/rfc6455.
    
        Here is an example WebSocket handler that echos back all received messages
        back to the client:
    
        .. testcode::
    
          class EchoWebSocket(tornado.websocket.WebSocketHandler):
              def open(self):
                  print("WebSocket opened")
    
              def on_message(self, message):
                  self.write_message(u"You said: " + message)
    
              def on_close(self):
                  print("WebSocket closed")
    
        .. testoutput::
           :hide:
    
        WebSockets are not standard HTTP connections. The "handshake" is
        HTTP, but after the handshake, the protocol is
        message-based. Consequently, most of the Tornado HTTP facilities
        are not available in handlers of this type. The only communication
        methods available to you are `write_message()`, `ping()`, and
        `close()`. Likewise, your request handler class should implement
        `open()` method rather than ``get()`` or ``post()``.
    
        If you map the handler above to ``/websocket`` in your application, you can
        invoke it in JavaScript with::
    
          var ws = new WebSocket("ws://localhost:8888/websocket");
          ws.onopen = function() {
             ws.send("Hello, world");
          };
          ws.onmessage = function (evt) {
             alert(evt.data);
          };
    
        This script pops up an alert box that says "You said: Hello, world".
    
        Web browsers allow any site to open a websocket connection to any other,
        instead of using the same-origin policy that governs other network
        access from javascript.  This can be surprising and is a potential
        security hole, so since Tornado 4.0 `WebSocketHandler` requires
        applications that wish to receive cross-origin websockets to opt in
        by overriding the `~WebSocketHandler.check_origin` method (see that
        method's docs for details).  Failure to do so is the most likely
        cause of 403 errors when making a websocket connection.
    
        When using a secure websocket connection (``wss://``) with a self-signed
        certificate, the connection from a browser may fail because it wants
        to show the "accept this certificate" dialog but has nowhere to show it.
        You must first visit a regular HTML page using the same certificate
        to accept it before the websocket connection will succeed.
    
        If the application setting ``websocket_ping_interval`` has a non-zero
        value, a ping will be sent periodically, and the connection will be
        closed if a response is not received before the ``websocket_ping_timeout``.
    
        Messages larger than the ``websocket_max_message_size`` application setting
        (default 10MiB) will not be accepted.
    
        .. versionchanged:: 4.5
           Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
           ``websocket_max_message_size``.
        """
        def __init__(self, application, request, **kwargs):
            super(WebSocketHandler, self).__init__(application, request, **kwargs)
            self.ws_connection = None
            self.close_code = None
            self.close_reason = None
            self.stream = None
            self._on_close_called = False
    
        @tornado.web.asynchronous
        def get(self, *args, **kwargs):
            self.open_args = args
            self.open_kwargs = kwargs
    
            # Upgrade header should be present and should be equal to WebSocket
            if self.request.headers.get("Upgrade", "").lower() != 'websocket':
                self.set_status(400)
                log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
                self.finish(log_msg)
                gen_log.debug(log_msg)
                return
    
            # Connection header should be upgrade.
            # Some proxy servers/load balancers
            # might mess with it.
            headers = self.request.headers
            connection = map(lambda s: s.strip().lower(),
                             headers.get("Connection", "").split(","))
            if 'upgrade' not in connection:
                self.set_status(400)
                log_msg = "\"Connection\" must be \"Upgrade\"."
                self.finish(log_msg)
                gen_log.debug(log_msg)
                return
    
            # Handle WebSocket Origin naming convention differences
            # The difference between version 8 and 13 is that in 8 the
            # client sends a "Sec-Websocket-Origin" header and in 13 it's
            # simply "Origin".
            if "Origin" in self.request.headers:
                origin = self.request.headers.get("Origin")
            else:
                origin = self.request.headers.get("Sec-Websocket-Origin", None)
    
            # If there was an origin header, check to make sure it matches
            # according to check_origin. When the origin is None, we assume it
            # did not come from a browser and that it can be passed on.
            if origin is not None and not self.check_origin(origin):
                self.set_status(403)
                log_msg = "Cross origin websockets not allowed"
                self.finish(log_msg)
                gen_log.debug(log_msg)
                return
    
            self.ws_connection = self.get_websocket_protocol()
            if self.ws_connection:
                self.ws_connection.accept_connection()
            else:
                self.set_status(426, "Upgrade Required")
                self.set_header("Sec-WebSocket-Version", "7, 8, 13")
                self.finish()
    
        stream = None
    
        @property
        def ping_interval(self):
            """The interval for websocket keep-alive pings.
    
            Set websocket_ping_interval = 0 to disable pings.
            """
            return self.settings.get('websocket_ping_interval', None)
    
        @property
        def ping_timeout(self):
            """If no ping is received in this many seconds,
            close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
            Default is max of 3 pings or 30 seconds.
            """
            return self.settings.get('websocket_ping_timeout', None)
    
        @property
        def max_message_size(self):
            """Maximum allowed message size.
    
            If the remote peer sends a message larger than this, the connection
            will be closed.
    
            Default is 10MiB.
            """
            return self.settings.get('websocket_max_message_size', None)
    
        def write_message(self, message, binary=False):
            """Sends the given message to the client of this Web Socket.
    
            The message may be either a string or a dict (which will be
            encoded as json).  If the ``binary`` argument is false, the
            message will be sent as utf8; in binary mode any byte string
            is allowed.
    
            If the connection is already closed, raises `WebSocketClosedError`.
    
            .. versionchanged:: 3.2
               `WebSocketClosedError` was added (previously a closed connection
               would raise an `AttributeError`)
    
            .. versionchanged:: 4.3
               Returns a `.Future` which can be used for flow control.
            """
            if self.ws_connection is None:
                raise WebSocketClosedError()
            if isinstance(message, dict):
                message = tornado.escape.json_encode(message)
            return self.ws_connection.write_message(message, binary=binary)
    
        def select_subprotocol(self, subprotocols):
            """Invoked when a new WebSocket requests specific subprotocols.
    
            ``subprotocols`` is a list of strings identifying the
            subprotocols proposed by the client.  This method may be
            overridden to return one of those strings to select it, or
            ``None`` to not select a subprotocol.  Failure to select a
            subprotocol does not automatically abort the connection,
            although clients may close the connection if none of their
            proposed subprotocols was selected.
            """
            return None
    
        def get_compression_options(self):
            """Override to return compression options for the connection.
    
            If this method returns None (the default), compression will
            be disabled.  If it returns a dict (even an empty one), it
            will be enabled.  The contents of the dict may be used to
            control the following compression options:
    
            ``compression_level`` specifies the compression level.
    
            ``mem_level`` specifies the amount of memory used for the internal compression state.
    
             These parameters are documented in details here:
             https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
    
            .. versionadded:: 4.1
    
            .. versionchanged:: 4.5
    
               Added ``compression_level`` and ``mem_level``.
            """
            # TODO: Add wbits option.
            return None
    
        def open(self, *args, **kwargs):
            """Invoked when a new WebSocket is opened.
    
            The arguments to `open` are extracted from the `tornado.web.URLSpec`
            regular expression, just like the arguments to
            `tornado.web.RequestHandler.get`.
            """
            pass
    
        def on_message(self, message):
            """Handle incoming messages on the WebSocket
    
            This method must be overridden.
    
            .. versionchanged:: 4.5
    
               ``on_message`` can be a coroutine.
            """
            raise NotImplementedError
    
        def ping(self, data):
            """Send ping frame to the remote end."""
            if self.ws_connection is None:
                raise WebSocketClosedError()
            self.ws_connection.write_ping(data)
    
        def on_pong(self, data):
            """Invoked when the response to a ping frame is received."""
            pass
    
        def on_ping(self, data):
            """Invoked when the a ping frame is received."""
            pass
    
        def on_close(self):
            """Invoked when the WebSocket is closed.
    
            If the connection was closed cleanly and a status code or reason
            phrase was supplied, these values will be available as the attributes
            ``self.close_code`` and ``self.close_reason``.
    
            .. versionchanged:: 4.0
    
               Added ``close_code`` and ``close_reason`` attributes.
            """
            pass
    
        def close(self, code=None, reason=None):
            """Closes this Web Socket.
    
            Once the close handshake is successful the socket will be closed.
    
            ``code`` may be a numeric status code, taken from the values
            defined in `RFC 6455 section 7.4.1
            `_.
            ``reason`` may be a textual message about why the connection is
            closing.  These values are made available to the client, but are
            not otherwise interpreted by the websocket protocol.
    
            .. versionchanged:: 4.0
    
               Added the ``code`` and ``reason`` arguments.
            """
            if self.ws_connection:
                self.ws_connection.close(code, reason)
                self.ws_connection = None
    
        def check_origin(self, origin):
            """Override to enable support for allowing alternate origins.
    
            The ``origin`` argument is the value of the ``Origin`` HTTP
            header, the url responsible for initiating this request.  This
            method is not called for clients that do not send this header;
            such requests are always allowed (because all browsers that
            implement WebSockets support this header, and non-browser
            clients do not have the same cross-site security concerns).
    
            Should return True to accept the request or False to reject it.
            By default, rejects all requests with an origin on a host other
            than this one.
    
            This is a security protection against cross site scripting attacks on
            browsers, since WebSockets are allowed to bypass the usual same-origin
            policies and don't use CORS headers.
    
            .. warning::
    
               This is an important security measure; don't disable it
               without understanding the security implications. In
               particular, if your authentication is cookie-based, you
               must either restrict the origins allowed by
               ``check_origin()`` or implement your own XSRF-like
               protection for websocket connections. See `these
               `_
               `articles
               `_
               for more.
    
            To accept all cross-origin traffic (which was the default prior to
            Tornado 4.0), simply override this method to always return true::
    
                def check_origin(self, origin):
                    return True
    
            To allow connections from any subdomain of your site, you might
            do something like::
    
                def check_origin(self, origin):
                    parsed_origin = urllib.parse.urlparse(origin)
                    return parsed_origin.netloc.endswith(".mydomain.com")
    
            .. versionadded:: 4.0
    
            """
            parsed_origin = urlparse(origin)
            origin = parsed_origin.netloc
            origin = origin.lower()
    
            host = self.request.headers.get("Host")
    
            # Check to see that origin matches host directly, including ports
            return origin == host
    
        def set_nodelay(self, value):
            """Set the no-delay flag for this stream.
    
            By default, small messages may be delayed and/or combined to minimize
            the number of packets sent.  This can sometimes cause 200-500ms delays
            due to the interaction between Nagle's algorithm and TCP delayed
            ACKs.  To reduce this delay (at the expense of possibly increasing
            bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
            connection is established.
    
            See `.BaseIOStream.set_nodelay` for additional details.
    
            .. versionadded:: 3.1
            """
            self.stream.set_nodelay(value)
    
        def on_connection_close(self):
            if self.ws_connection:
                self.ws_connection.on_connection_close()
                self.ws_connection = None
            if not self._on_close_called:
                self._on_close_called = True
                self.on_close()
                self._break_cycles()
    
        def _break_cycles(self):
            # WebSocketHandlers call finish() early, but we don't want to
            # break up reference cycles (which makes it impossible to call
            # self.render_string) until after we've really closed the
            # connection (if it was established in the first place,
            # indicated by status code 101).
            if self.get_status() != 101 or self._on_close_called:
                super(WebSocketHandler, self)._break_cycles()
    
        def send_error(self, *args, **kwargs):
            if self.stream is None:
                super(WebSocketHandler, self).send_error(*args, **kwargs)
            else:
                # If we get an uncaught exception during the handshake,
                # we have no choice but to abruptly close the connection.
                # TODO: for uncaught exceptions after the handshake,
                # we can close the connection more gracefully.
                self.stream.close()
    
        def get_websocket_protocol(self):
            websocket_version = self.request.headers.get("Sec-WebSocket-Version")
            if websocket_version in ("7", "8", "13"):
                return WebSocketProtocol13(
                    self, compression_options=self.get_compression_options())
    
        def _attach_stream(self):
            self.stream = self.request.connection.detach()
            self.stream.set_close_callback(self.on_connection_close)
            # disable non-WS methods
            for method in ["write", "redirect", "set_header", "set_cookie",
                           "set_status", "flush", "finish"]:
                setattr(self, method, _raise_not_supported_for_websockets)
    
    
    def _raise_not_supported_for_websockets(*args, **kwargs):
        raise RuntimeError("Method not supported for Web Sockets")
    
    
    class WebSocketProtocol(object):
        """Base class for WebSocket protocol versions.
        """
        def __init__(self, handler):
            self.handler = handler
            self.request = handler.request
            self.stream = handler.stream
            self.client_terminated = False
            self.server_terminated = False
    
        def _run_callback(self, callback, *args, **kwargs):
            """Runs the given callback with exception handling.
    
            If the callback is a coroutine, returns its Future. On error, aborts the
            websocket connection and returns None.
            """
            try:
                result = callback(*args, **kwargs)
            except Exception:
                app_log.error("Uncaught exception in %s",
                              getattr(self.request, 'path', None), exc_info=True)
                self._abort()
            else:
                if result is not None:
                    result = gen.convert_yielded(result)
                    self.stream.io_loop.add_future(result, lambda f: f.result())
                return result
    
        def on_connection_close(self):
            self._abort()
    
        def _abort(self):
            """Instantly aborts the WebSocket connection by closing the socket"""
            self.client_terminated = True
            self.server_terminated = True
            self.stream.close()  # forcibly tear down the connection
            self.close()  # let the subclass cleanup
    
    
    class _PerMessageDeflateCompressor(object):
        def __init__(self, persistent, max_wbits, compression_options=None):
            if max_wbits is None:
                max_wbits = zlib.MAX_WBITS
            # There is no symbolic constant for the minimum wbits value.
            if not (8 <= max_wbits <= zlib.MAX_WBITS):
                raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
                                 max_wbits, zlib.MAX_WBITS)
            self._max_wbits = max_wbits
    
            if compression_options is None or 'compression_level' not in compression_options:
                self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL
            else:
                self._compression_level = compression_options['compression_level']
    
            if compression_options is None or 'mem_level' not in compression_options:
                self._mem_level = 8
            else:
                self._mem_level = compression_options['mem_level']
    
            if persistent:
                self._compressor = self._create_compressor()
            else:
                self._compressor = None
    
        def _create_compressor(self):
            return zlib.compressobj(self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level)
    
        def compress(self, data):
            compressor = self._compressor or self._create_compressor()
            data = (compressor.compress(data) +
                    compressor.flush(zlib.Z_SYNC_FLUSH))
            assert data.endswith(b'\x00\x00\xff\xff')
            return data[:-4]
    
    
    class _PerMessageDeflateDecompressor(object):
        def __init__(self, persistent, max_wbits, compression_options=None):
            if max_wbits is None:
                max_wbits = zlib.MAX_WBITS
            if not (8 <= max_wbits <= zlib.MAX_WBITS):
                raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
                                 max_wbits, zlib.MAX_WBITS)
            self._max_wbits = max_wbits
            if persistent:
                self._decompressor = self._create_decompressor()
            else:
                self._decompressor = None
    
        def _create_decompressor(self):
            return zlib.decompressobj(-self._max_wbits)
    
        def decompress(self, data):
            decompressor = self._decompressor or self._create_decompressor()
            return decompressor.decompress(data + b'\x00\x00\xff\xff')
    
    
    class WebSocketProtocol13(WebSocketProtocol):
        """Implementation of the WebSocket protocol from RFC 6455.
    
        This class supports versions 7 and 8 of the protocol in addition to the
        final version 13.
        """
        # Bit masks for the first byte of a frame.
        FIN = 0x80
        RSV1 = 0x40
        RSV2 = 0x20
        RSV3 = 0x10
        RSV_MASK = RSV1 | RSV2 | RSV3
        OPCODE_MASK = 0x0f
    
        def __init__(self, handler, mask_outgoing=False,
                     compression_options=None):
            WebSocketProtocol.__init__(self, handler)
            self.mask_outgoing = mask_outgoing
            self._final_frame = False
            self._frame_opcode = None
            self._masked_frame = None
            self._frame_mask = None
            self._frame_length = None
            self._fragmented_message_buffer = None
            self._fragmented_message_opcode = None
            self._waiting = None
            self._compression_options = compression_options
            self._decompressor = None
            self._compressor = None
            self._frame_compressed = None
            # The total uncompressed size of all messages received or sent.
            # Unicode messages are encoded to utf8.
            # Only for testing; subject to change.
            self._message_bytes_in = 0
            self._message_bytes_out = 0
            # The total size of all packets received or sent.  Includes
            # the effect of compression, frame overhead, and control frames.
            self._wire_bytes_in = 0
            self._wire_bytes_out = 0
            self.ping_callback = None
            self.last_ping = 0
            self.last_pong = 0
    
        def accept_connection(self):
            try:
                self._handle_websocket_headers()
            except ValueError:
                self.handler.set_status(400)
                log_msg = "Missing/Invalid WebSocket headers"
                self.handler.finish(log_msg)
                gen_log.debug(log_msg)
                return
    
            try:
                self._accept_connection()
            except ValueError:
                gen_log.debug("Malformed WebSocket request received",
                              exc_info=True)
                self._abort()
                return
    
        def _handle_websocket_headers(self):
            """Verifies all invariant- and required headers
    
            If a header is missing or have an incorrect value ValueError will be
            raised
            """
            fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
            if not all(map(lambda f: self.request.headers.get(f), fields)):
                raise ValueError("Missing/Invalid WebSocket headers")
    
        @staticmethod
        def compute_accept_value(key):
            """Computes the value for the Sec-WebSocket-Accept header,
            given the value for Sec-WebSocket-Key.
            """
            sha1 = hashlib.sha1()
            sha1.update(utf8(key))
            sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11")  # Magic value
            return native_str(base64.b64encode(sha1.digest()))
    
        def _challenge_response(self):
            return WebSocketProtocol13.compute_accept_value(
                self.request.headers.get("Sec-Websocket-Key"))
    
        def _accept_connection(self):
            subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
            subprotocols = [s.strip() for s in subprotocols.split(',')]
            if subprotocols:
                selected = self.handler.select_subprotocol(subprotocols)
                if selected:
                    assert selected in subprotocols
                    self.handler.set_header("Sec-WebSocket-Protocol", selected)
    
            extensions = self._parse_extensions_header(self.request.headers)
            for ext in extensions:
                if (ext[0] == 'permessage-deflate' and
                        self._compression_options is not None):
                    # TODO: negotiate parameters if compression_options
                    # specifies limits.
                    self._create_compressors('server', ext[1], self._compression_options)
                    if ('client_max_window_bits' in ext[1] and
                            ext[1]['client_max_window_bits'] is None):
                        # Don't echo an offered client_max_window_bits
                        # parameter with no value.
                        del ext[1]['client_max_window_bits']
                    self.handler.set_header("Sec-WebSocket-Extensions",
                                            httputil._encode_header(
                                                'permessage-deflate', ext[1]))
                    break
    
            self.handler.clear_header("Content-Type")
            self.handler.set_status(101)
            self.handler.set_header("Upgrade", "websocket")
            self.handler.set_header("Connection", "Upgrade")
            self.handler.set_header("Sec-WebSocket-Accept", self._challenge_response())
            self.handler.finish()
    
            self.handler._attach_stream()
            self.stream = self.handler.stream
    
            self.start_pinging()
            self._run_callback(self.handler.open, *self.handler.open_args,
                               **self.handler.open_kwargs)
            self._receive_frame()
    
        def _parse_extensions_header(self, headers):
            extensions = headers.get("Sec-WebSocket-Extensions", '')
            if extensions:
                return [httputil._parse_header(e.strip())
                        for e in extensions.split(',')]
            return []
    
        def _process_server_headers(self, key, headers):
            """Process the headers sent by the server to this client connection.
    
            'key' is the websocket handshake challenge/response key.
            """
            assert headers['Upgrade'].lower() == 'websocket'
            assert headers['Connection'].lower() == 'upgrade'
            accept = self.compute_accept_value(key)
            assert headers['Sec-Websocket-Accept'] == accept
    
            extensions = self._parse_extensions_header(headers)
            for ext in extensions:
                if (ext[0] == 'permessage-deflate' and
                        self._compression_options is not None):
                    self._create_compressors('client', ext[1])
                else:
                    raise ValueError("unsupported extension %r", ext)
    
        def _get_compressor_options(self, side, agreed_parameters, compression_options=None):
            """Converts a websocket agreed_parameters set to keyword arguments
            for our compressor objects.
            """
            options = dict(
                persistent=(side + '_no_context_takeover') not in agreed_parameters)
            wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
            if wbits_header is None:
                options['max_wbits'] = zlib.MAX_WBITS
            else:
                options['max_wbits'] = int(wbits_header)
            options['compression_options'] = compression_options
            return options
    
        def _create_compressors(self, side, agreed_parameters, compression_options=None):
            # TODO: handle invalid parameters gracefully
            allowed_keys = set(['server_no_context_takeover',
                                'client_no_context_takeover',
                                'server_max_window_bits',
                                'client_max_window_bits'])
            for key in agreed_parameters:
                if key not in allowed_keys:
                    raise ValueError("unsupported compression parameter %r" % key)
            other_side = 'client' if (side == 'server') else 'server'
            self._compressor = _PerMessageDeflateCompressor(
                **self._get_compressor_options(side, agreed_parameters, compression_options))
            self._decompressor = _PerMessageDeflateDecompressor(
                **self._get_compressor_options(other_side, agreed_parameters, compression_options))
    
        def _write_frame(self, fin, opcode, data, flags=0):
            if fin:
                finbit = self.FIN
            else:
                finbit = 0
            frame = struct.pack("B", finbit | opcode | flags)
            l = len(data)
            if self.mask_outgoing:
                mask_bit = 0x80
            else:
                mask_bit = 0
            if l < 126:
                frame += struct.pack("B", l | mask_bit)
            elif l <= 0xFFFF:
                frame += struct.pack("!BH", 126 | mask_bit, l)
            else:
                frame += struct.pack("!BQ", 127 | mask_bit, l)
            if self.mask_outgoing:
                mask = os.urandom(4)
                data = mask + _websocket_mask(mask, data)
            frame += data
            self._wire_bytes_out += len(frame)
            try:
                return self.stream.write(frame)
            except StreamClosedError:
                self._abort()
    
        def write_message(self, message, binary=False):
            """Sends the given message to the client of this Web Socket."""
            if binary:
                opcode = 0x2
            else:
                opcode = 0x1
            message = tornado.escape.utf8(message)
            assert isinstance(message, bytes)
            self._message_bytes_out += len(message)
            flags = 0
            if self._compressor:
                message = self._compressor.compress(message)
                flags |= self.RSV1
            return self._write_frame(True, opcode, message, flags=flags)
    
        def write_ping(self, data):
            """Send ping frame."""
            assert isinstance(data, bytes)
            self._write_frame(True, 0x9, data)
    
        def _receive_frame(self):
            try:
                self.stream.read_bytes(2, self._on_frame_start)
            except StreamClosedError:
                self._abort()
    
        def _on_frame_start(self, data):
            self._wire_bytes_in += len(data)
            header, payloadlen = struct.unpack("BB", data)
            self._final_frame = header & self.FIN
            reserved_bits = header & self.RSV_MASK
            self._frame_opcode = header & self.OPCODE_MASK
            self._frame_opcode_is_control = self._frame_opcode & 0x8
            if self._decompressor is not None and self._frame_opcode != 0:
                self._frame_compressed = bool(reserved_bits & self.RSV1)
                reserved_bits &= ~self.RSV1
            if reserved_bits:
                # client is using as-yet-undefined extensions; abort
                self._abort()
                return
            self._masked_frame = bool(payloadlen & 0x80)
            payloadlen = payloadlen & 0x7f
            if self._frame_opcode_is_control and payloadlen >= 126:
                # control frames must have payload < 126
                self._abort()
                return
            try:
                if payloadlen < 126:
                    self._frame_length = payloadlen
                    if self._masked_frame:
                        self.stream.read_bytes(4, self._on_masking_key)
                    else:
                        self._read_frame_data(False)
                elif payloadlen == 126:
                    self.stream.read_bytes(2, self._on_frame_length_16)
                elif payloadlen == 127:
                    self.stream.read_bytes(8, self._on_frame_length_64)
            except StreamClosedError:
                self._abort()
    
        def _read_frame_data(self, masked):
            new_len = self._frame_length
            if self._fragmented_message_buffer is not None:
                new_len += len(self._fragmented_message_buffer)
            if new_len > (self.handler.max_message_size or 10 * 1024 * 1024):
                self.close(1009, "message too big")
                return
            self.stream.read_bytes(
                self._frame_length,
                self._on_masked_frame_data if masked else self._on_frame_data)
    
        def _on_frame_length_16(self, data):
            self._wire_bytes_in += len(data)
            self._frame_length = struct.unpack("!H", data)[0]
            try:
                if self._masked_frame:
                    self.stream.read_bytes(4, self._on_masking_key)
                else:
                    self._read_frame_data(False)
            except StreamClosedError:
                self._abort()
    
        def _on_frame_length_64(self, data):
            self._wire_bytes_in += len(data)
            self._frame_length = struct.unpack("!Q", data)[0]
            try:
                if self._masked_frame:
                    self.stream.read_bytes(4, self._on_masking_key)
                else:
                    self._read_frame_data(False)
            except StreamClosedError:
                self._abort()
    
        def _on_masking_key(self, data):
            self._wire_bytes_in += len(data)
            self._frame_mask = data
            try:
                self._read_frame_data(True)
            except StreamClosedError:
                self._abort()
    
        def _on_masked_frame_data(self, data):
            # Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
            self._on_frame_data(_websocket_mask(self._frame_mask, data))
    
        def _on_frame_data(self, data):
            handled_future = None
    
            self._wire_bytes_in += len(data)
            if self._frame_opcode_is_control:
                # control frames may be interleaved with a series of fragmented
                # data frames, so control frames must not interact with
                # self._fragmented_*
                if not self._final_frame:
                    # control frames must not be fragmented
                    self._abort()
                    return
                opcode = self._frame_opcode
            elif self._frame_opcode == 0:  # continuation frame
                if self._fragmented_message_buffer is None:
                    # nothing to continue
                    self._abort()
                    return
                self._fragmented_message_buffer += data
                if self._final_frame:
                    opcode = self._fragmented_message_opcode
                    data = self._fragmented_message_buffer
                    self._fragmented_message_buffer = None
            else:  # start of new data message
                if self._fragmented_message_buffer is not None:
                    # can't start new message until the old one is finished
                    self._abort()
                    return
                if self._final_frame:
                    opcode = self._frame_opcode
                else:
                    self._fragmented_message_opcode = self._frame_opcode
                    self._fragmented_message_buffer = data
    
            if self._final_frame:
                handled_future = self._handle_message(opcode, data)
    
            if not self.client_terminated:
                if handled_future:
                    # on_message is a coroutine, process more frames once it's done.
                    handled_future.add_done_callback(
                        lambda future: self._receive_frame())
                else:
                    self._receive_frame()
    
        def _handle_message(self, opcode, data):
            """Execute on_message, returning its Future if it is a coroutine."""
            if self.client_terminated:
                return
    
            if self._frame_compressed:
                data = self._decompressor.decompress(data)
    
            if opcode == 0x1:
                # UTF-8 data
                self._message_bytes_in += len(data)
                try:
                    decoded = data.decode("utf-8")
                except UnicodeDecodeError:
                    self._abort()
                    return
                return self._run_callback(self.handler.on_message, decoded)
            elif opcode == 0x2:
                # Binary data
                self._message_bytes_in += len(data)
                return self._run_callback(self.handler.on_message, data)
            elif opcode == 0x8:
                # Close
                self.client_terminated = True
                if len(data) >= 2:
                    self.handler.close_code = struct.unpack('>H', data[:2])[0]
                if len(data) > 2:
                    self.handler.close_reason = to_unicode(data[2:])
                # Echo the received close code, if any (RFC 6455 section 5.5.1).
                self.close(self.handler.close_code)
            elif opcode == 0x9:
                # Ping
                self._write_frame(True, 0xA, data)
                self._run_callback(self.handler.on_ping, data)
            elif opcode == 0xA:
                # Pong
                self.last_pong = IOLoop.current().time()
                return self._run_callback(self.handler.on_pong, data)
            else:
                self._abort()
    
        def close(self, code=None, reason=None):
            """Closes the WebSocket connection."""
            if not self.server_terminated:
                if not self.stream.closed():
                    if code is None and reason is not None:
                        code = 1000  # "normal closure" status code
                    if code is None:
                        close_data = b''
                    else:
                        close_data = struct.pack('>H', code)
                    if reason is not None:
                        close_data += utf8(reason)
                    self._write_frame(True, 0x8, close_data)
                self.server_terminated = True
            if self.client_terminated:
                if self._waiting is not None:
                    self.stream.io_loop.remove_timeout(self._waiting)
                    self._waiting = None
                self.stream.close()
            elif self._waiting is None:
                # Give the client a few seconds to complete a clean shutdown,
                # otherwise just close the connection.
                self._waiting = self.stream.io_loop.add_timeout(
                    self.stream.io_loop.time() + 5, self._abort)
    
        @property
        def ping_interval(self):
            interval = self.handler.ping_interval
            if interval is not None:
                return interval
            return 0
    
        @property
        def ping_timeout(self):
            timeout = self.handler.ping_timeout
            if timeout is not None:
                return timeout
            return max(3 * self.ping_interval, 30)
    
        def start_pinging(self):
            """Start sending periodic pings to keep the connection alive"""
            if self.ping_interval > 0:
                self.last_ping = self.last_pong = IOLoop.current().time()
                self.ping_callback = PeriodicCallback(
                    self.periodic_ping, self.ping_interval * 1000)
                self.ping_callback.start()
    
        def periodic_ping(self):
            """Send a ping to keep the websocket alive
    
            Called periodically if the websocket_ping_interval is set and non-zero.
            """
            if self.stream.closed() and self.ping_callback is not None:
                self.ping_callback.stop()
                return
    
            # Check for timeout on pong. Make sure that we really have
            # sent a recent ping in case the machine with both server and
            # client has been suspended since the last ping.
            now = IOLoop.current().time()
            since_last_pong = now - self.last_pong
            since_last_ping = now - self.last_ping
            if (since_last_ping < 2 * self.ping_interval and
                    since_last_pong > self.ping_timeout):
                self.close()
                return
    
            self.write_ping(b'')
            self.last_ping = now
    
    
    class WebSocketClientConnection(simple_httpclient._HTTPConnection):
        """WebSocket client connection.
    
        This class should not be instantiated directly; use the
        `websocket_connect` function instead.
        """
        def __init__(self, io_loop, request, on_message_callback=None,
                     compression_options=None, ping_interval=None, ping_timeout=None,
                     max_message_size=None):
            self.compression_options = compression_options
            self.connect_future = TracebackFuture()
            self.protocol = None
            self.read_future = None
            self.read_queue = collections.deque()
            self.key = base64.b64encode(os.urandom(16))
            self._on_message_callback = on_message_callback
            self.close_code = self.close_reason = None
            self.ping_interval = ping_interval
            self.ping_timeout = ping_timeout
            self.max_message_size = max_message_size
    
            scheme, sep, rest = request.url.partition(':')
            scheme = {'ws': 'http', 'wss': 'https'}[scheme]
            request.url = scheme + sep + rest
            request.headers.update({
                'Upgrade': 'websocket',
                'Connection': 'Upgrade',
                'Sec-WebSocket-Key': self.key,
                'Sec-WebSocket-Version': '13',
            })
            if self.compression_options is not None:
                # Always offer to let the server set our max_wbits (and even though
                # we don't offer it, we will accept a client_no_context_takeover
                # from the server).
                # TODO: set server parameters for deflate extension
                # if requested in self.compression_options.
                request.headers['Sec-WebSocket-Extensions'] = (
                    'permessage-deflate; client_max_window_bits')
    
            self.tcp_client = TCPClient(io_loop=io_loop)
            super(WebSocketClientConnection, self).__init__(
                io_loop, None, request, lambda: None, self._on_http_response,
                104857600, self.tcp_client, 65536, 104857600)
    
        def close(self, code=None, reason=None):
            """Closes the websocket connection.
    
            ``code`` and ``reason`` are documented under
            `WebSocketHandler.close`.
    
            .. versionadded:: 3.2
    
            .. versionchanged:: 4.0
    
               Added the ``code`` and ``reason`` arguments.
            """
            if self.protocol is not None:
                self.protocol.close(code, reason)
                self.protocol = None
    
        def on_connection_close(self):
            if not self.connect_future.done():
                self.connect_future.set_exception(StreamClosedError())
            self.on_message(None)
            self.tcp_client.close()
            super(WebSocketClientConnection, self).on_connection_close()
    
        def _on_http_response(self, response):
            if not self.connect_future.done():
                if response.error:
                    self.connect_future.set_exception(response.error)
                else:
                    self.connect_future.set_exception(WebSocketError(
                        "Non-websocket response"))
    
        def headers_received(self, start_line, headers):
            if start_line.code != 101:
                return super(WebSocketClientConnection, self).headers_received(
                    start_line, headers)
    
            self.headers = headers
            self.protocol = self.get_websocket_protocol()
            self.protocol._process_server_headers(self.key, self.headers)
            self.protocol.start_pinging()
            self.protocol._receive_frame()
    
            if self._timeout is not None:
                self.io_loop.remove_timeout(self._timeout)
                self._timeout = None
    
            self.stream = self.connection.detach()
            self.stream.set_close_callback(self.on_connection_close)
            # Once we've taken over the connection, clear the final callback
            # we set on the http request.  This deactivates the error handling
            # in simple_httpclient that would otherwise interfere with our
            # ability to see exceptions.
            self.final_callback = None
    
            self.connect_future.set_result(self)
    
        def write_message(self, message, binary=False):
            """Sends a message to the WebSocket server."""
            return self.protocol.write_message(message, binary)
    
        def read_message(self, callback=None):
            """Reads a message from the WebSocket server.
    
            If on_message_callback was specified at WebSocket
            initialization, this function will never return messages
    
            Returns a future whose result is the message, or None
            if the connection is closed.  If a callback argument
            is given it will be called with the future when it is
            ready.
            """
            assert self.read_future is None
            future = TracebackFuture()
            if self.read_queue:
                future.set_result(self.read_queue.popleft())
            else:
                self.read_future = future
            if callback is not None:
                self.io_loop.add_future(future, callback)
            return future
    
        def on_message(self, message):
            if self._on_message_callback:
                self._on_message_callback(message)
            elif self.read_future is not None:
                self.read_future.set_result(message)
                self.read_future = None
            else:
                self.read_queue.append(message)
    
        def on_pong(self, data):
            pass
    
        def on_ping(self, data):
            pass
    
        def get_websocket_protocol(self):
            return WebSocketProtocol13(self, mask_outgoing=True,
                                       compression_options=self.compression_options)
    
    
    def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
                          on_message_callback=None, compression_options=None,
                          ping_interval=None, ping_timeout=None,
                          max_message_size=None):
        """Client-side websocket support.
    
        Takes a url and returns a Future whose result is a
        `WebSocketClientConnection`.
    
        ``compression_options`` is interpreted in the same way as the
        return value of `.WebSocketHandler.get_compression_options`.
    
        The connection supports two styles of operation. In the coroutine
        style, the application typically calls
        `~.WebSocketClientConnection.read_message` in a loop::
    
            conn = yield websocket_connect(url)
            while True:
                msg = yield conn.read_message()
                if msg is None: break
                # Do something with msg
    
        In the callback style, pass an ``on_message_callback`` to
        ``websocket_connect``. In both styles, a message of ``None``
        indicates that the connection has been closed.
    
        .. versionchanged:: 3.2
           Also accepts ``HTTPRequest`` objects in place of urls.
    
        .. versionchanged:: 4.1
           Added ``compression_options`` and ``on_message_callback``.
           The ``io_loop`` argument is deprecated.
    
        .. versionchanged:: 4.5
           Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
           arguments, which have the same meaning as in `WebSocketHandler`.
        """
        if io_loop is None:
            io_loop = IOLoop.current()
        if isinstance(url, httpclient.HTTPRequest):
            assert connect_timeout is None
            request = url
            # Copy and convert the headers dict/object (see comments in
            # AsyncHTTPClient.fetch)
            request.headers = httputil.HTTPHeaders(request.headers)
        else:
            request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
        request = httpclient._RequestProxy(
            request, httpclient.HTTPRequest._DEFAULTS)
        conn = WebSocketClientConnection(io_loop, request,
                                         on_message_callback=on_message_callback,
                                         compression_options=compression_options,
                                         ping_interval=ping_interval,
                                         ping_timeout=ping_timeout,
                                         max_message_size=max_message_size)
        if callback is not None:
            io_loop.add_future(conn.connect_future, callback)
        return conn.connect_future
    tornado-4.5.3/tornado/wsgi.py000066400000000000000000000321461322420601000161560ustar00rootroot00000000000000#!/usr/bin/env python
    #
    # Copyright 2009 Facebook
    #
    # Licensed under the Apache License, Version 2.0 (the "License"); you may
    # not use this file except in compliance with the License. You may obtain
    # a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    # License for the specific language governing permissions and limitations
    # under the License.
    
    """WSGI support for the Tornado web framework.
    
    WSGI is the Python standard for web servers, and allows for interoperability
    between Tornado and other Python web frameworks and servers.  This module
    provides WSGI support in two ways:
    
    * `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
      interface.  This is useful for running a Tornado app on another
      HTTP server, such as Google App Engine.  See the `WSGIAdapter` class
      documentation for limitations that apply.
    * `WSGIContainer` lets you run other WSGI applications and frameworks on the
      Tornado HTTP server.  For example, with this class you can mix Django
      and Tornado handlers in a single server.
    """
    
    from __future__ import absolute_import, division, print_function
    
    import sys
    from io import BytesIO
    import tornado
    
    from tornado.concurrent import Future
    from tornado import escape
    from tornado import httputil
    from tornado.log import access_log
    from tornado import web
    from tornado.escape import native_str
    from tornado.util import unicode_type, PY3
    
    
    if PY3:
        import urllib.parse as urllib_parse  # py3
    else:
        import urllib as urllib_parse
    
    # PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
    # that are smuggled inside objects of type unicode (via the latin1 encoding).
    # These functions are like those in the tornado.escape module, but defined
    # here to minimize the temptation to use them in non-wsgi contexts.
    if str is unicode_type:
        def to_wsgi_str(s):
            assert isinstance(s, bytes)
            return s.decode('latin1')
    
        def from_wsgi_str(s):
            assert isinstance(s, str)
            return s.encode('latin1')
    else:
        def to_wsgi_str(s):
            assert isinstance(s, bytes)
            return s
    
        def from_wsgi_str(s):
            assert isinstance(s, str)
            return s
    
    
    class WSGIApplication(web.Application):
        """A WSGI equivalent of `tornado.web.Application`.
    
        .. deprecated:: 4.0
    
           Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
        """
        def __call__(self, environ, start_response):
            return WSGIAdapter(self)(environ, start_response)
    
    
    # WSGI has no facilities for flow control, so just return an already-done
    # Future when the interface requires it.
    _dummy_future = Future()
    _dummy_future.set_result(None)
    
    
    class _WSGIConnection(httputil.HTTPConnection):
        def __init__(self, method, start_response, context):
            self.method = method
            self.start_response = start_response
            self.context = context
            self._write_buffer = []
            self._finished = False
            self._expected_content_remaining = None
            self._error = None
    
        def set_close_callback(self, callback):
            # WSGI has no facility for detecting a closed connection mid-request,
            # so we can simply ignore the callback.
            pass
    
        def write_headers(self, start_line, headers, chunk=None, callback=None):
            if self.method == 'HEAD':
                self._expected_content_remaining = 0
            elif 'Content-Length' in headers:
                self._expected_content_remaining = int(headers['Content-Length'])
            else:
                self._expected_content_remaining = None
            self.start_response(
                '%s %s' % (start_line.code, start_line.reason),
                [(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
            if chunk is not None:
                self.write(chunk, callback)
            elif callback is not None:
                callback()
            return _dummy_future
    
        def write(self, chunk, callback=None):
            if self._expected_content_remaining is not None:
                self._expected_content_remaining -= len(chunk)
                if self._expected_content_remaining < 0:
                    self._error = httputil.HTTPOutputError(
                        "Tried to write more data than Content-Length")
                    raise self._error
            self._write_buffer.append(chunk)
            if callback is not None:
                callback()
            return _dummy_future
    
        def finish(self):
            if (self._expected_content_remaining is not None and
                    self._expected_content_remaining != 0):
                self._error = httputil.HTTPOutputError(
                    "Tried to write %d bytes less than Content-Length" %
                    self._expected_content_remaining)
                raise self._error
            self._finished = True
    
    
    class _WSGIRequestContext(object):
        def __init__(self, remote_ip, protocol):
            self.remote_ip = remote_ip
            self.protocol = protocol
    
        def __str__(self):
            return self.remote_ip
    
    
    class WSGIAdapter(object):
        """Converts a `tornado.web.Application` instance into a WSGI application.
    
        Example usage::
    
            import tornado.web
            import tornado.wsgi
            import wsgiref.simple_server
    
            class MainHandler(tornado.web.RequestHandler):
                def get(self):
                    self.write("Hello, world")
    
            if __name__ == "__main__":
                application = tornado.web.Application([
                    (r"/", MainHandler),
                ])
                wsgi_app = tornado.wsgi.WSGIAdapter(application)
                server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
                server.serve_forever()
    
        See the `appengine demo
        `_
        for an example of using this module to run a Tornado app on Google
        App Engine.
    
        In WSGI mode asynchronous methods are not supported.  This means
        that it is not possible to use `.AsyncHTTPClient`, or the
        `tornado.auth` or `tornado.websocket` modules.
    
        .. versionadded:: 4.0
        """
        def __init__(self, application):
            if isinstance(application, WSGIApplication):
                self.application = lambda request: web.Application.__call__(
                    application, request)
            else:
                self.application = application
    
        def __call__(self, environ, start_response):
            method = environ["REQUEST_METHOD"]
            uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
            uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
            if environ.get("QUERY_STRING"):
                uri += "?" + environ["QUERY_STRING"]
            headers = httputil.HTTPHeaders()
            if environ.get("CONTENT_TYPE"):
                headers["Content-Type"] = environ["CONTENT_TYPE"]
            if environ.get("CONTENT_LENGTH"):
                headers["Content-Length"] = environ["CONTENT_LENGTH"]
            for key in environ:
                if key.startswith("HTTP_"):
                    headers[key[5:].replace("_", "-")] = environ[key]
            if headers.get("Content-Length"):
                body = environ["wsgi.input"].read(
                    int(headers["Content-Length"]))
            else:
                body = b""
            protocol = environ["wsgi.url_scheme"]
            remote_ip = environ.get("REMOTE_ADDR", "")
            if environ.get("HTTP_HOST"):
                host = environ["HTTP_HOST"]
            else:
                host = environ["SERVER_NAME"]
            connection = _WSGIConnection(method, start_response,
                                         _WSGIRequestContext(remote_ip, protocol))
            request = httputil.HTTPServerRequest(
                method, uri, "HTTP/1.1", headers=headers, body=body,
                host=host, connection=connection)
            request._parse_body()
            self.application(request)
            if connection._error:
                raise connection._error
            if not connection._finished:
                raise Exception("request did not finish synchronously")
            return connection._write_buffer
    
    
    class WSGIContainer(object):
        r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
    
        .. warning::
    
           WSGI is a *synchronous* interface, while Tornado's concurrency model
           is based on single-threaded asynchronous execution.  This means that
           running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
           than running the same app in a multi-threaded WSGI server like
           ``gunicorn`` or ``uwsgi``.  Use `WSGIContainer` only when there are
           benefits to combining Tornado and WSGI in the same process that
           outweigh the reduced scalability.
    
        Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
        run it. For example::
    
            def simple_app(environ, start_response):
                status = "200 OK"
                response_headers = [("Content-type", "text/plain")]
                start_response(status, response_headers)
                return ["Hello world!\n"]
    
            container = tornado.wsgi.WSGIContainer(simple_app)
            http_server = tornado.httpserver.HTTPServer(container)
            http_server.listen(8888)
            tornado.ioloop.IOLoop.current().start()
    
        This class is intended to let other frameworks (Django, web.py, etc)
        run on the Tornado HTTP server and I/O loop.
    
        The `tornado.web.FallbackHandler` class is often useful for mixing
        Tornado and WSGI apps in the same server.  See
        https://github.com/bdarnell/django-tornado-demo for a complete example.
        """
        def __init__(self, wsgi_application):
            self.wsgi_application = wsgi_application
    
        def __call__(self, request):
            data = {}
            response = []
    
            def start_response(status, response_headers, exc_info=None):
                data["status"] = status
                data["headers"] = response_headers
                return response.append
            app_response = self.wsgi_application(
                WSGIContainer.environ(request), start_response)
            try:
                response.extend(app_response)
                body = b"".join(response)
            finally:
                if hasattr(app_response, "close"):
                    app_response.close()
            if not data:
                raise Exception("WSGI app did not call start_response")
    
            status_code, reason = data["status"].split(' ', 1)
            status_code = int(status_code)
            headers = data["headers"]
            header_set = set(k.lower() for (k, v) in headers)
            body = escape.utf8(body)
            if status_code != 304:
                if "content-length" not in header_set:
                    headers.append(("Content-Length", str(len(body))))
                if "content-type" not in header_set:
                    headers.append(("Content-Type", "text/html; charset=UTF-8"))
            if "server" not in header_set:
                headers.append(("Server", "TornadoServer/%s" % tornado.version))
    
            start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
            header_obj = httputil.HTTPHeaders()
            for key, value in headers:
                header_obj.add(key, value)
            request.connection.write_headers(start_line, header_obj, chunk=body)
            request.connection.finish()
            self._log(status_code, request)
    
        @staticmethod
        def environ(request):
            """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
            """
            hostport = request.host.split(":")
            if len(hostport) == 2:
                host = hostport[0]
                port = int(hostport[1])
            else:
                host = request.host
                port = 443 if request.protocol == "https" else 80
            environ = {
                "REQUEST_METHOD": request.method,
                "SCRIPT_NAME": "",
                "PATH_INFO": to_wsgi_str(escape.url_unescape(
                    request.path, encoding=None, plus=False)),
                "QUERY_STRING": request.query,
                "REMOTE_ADDR": request.remote_ip,
                "SERVER_NAME": host,
                "SERVER_PORT": str(port),
                "SERVER_PROTOCOL": request.version,
                "wsgi.version": (1, 0),
                "wsgi.url_scheme": request.protocol,
                "wsgi.input": BytesIO(escape.utf8(request.body)),
                "wsgi.errors": sys.stderr,
                "wsgi.multithread": False,
                "wsgi.multiprocess": True,
                "wsgi.run_once": False,
            }
            if "Content-Type" in request.headers:
                environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
            if "Content-Length" in request.headers:
                environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
            for key, value in request.headers.items():
                environ["HTTP_" + key.replace("-", "_").upper()] = value
            return environ
    
        def _log(self, status_code, request):
            if status_code < 400:
                log_method = access_log.info
            elif status_code < 500:
                log_method = access_log.warning
            else:
                log_method = access_log.error
            request_time = 1000.0 * request.request_time()
            summary = request.method + " " + request.uri + " (" + \
                request.remote_ip + ")"
            log_method("%d %s %.2fms", status_code, summary, request_time)
    
    
    HTTPRequest = httputil.HTTPServerRequest
    tornado-4.5.3/tox.ini000066400000000000000000000146201322420601000144750ustar00rootroot00000000000000# Tox (https://tox.readthedocs.io) is a tool for running tests
    # in multiple virtualenvs.  This configuration file will run the tornado
    # test suite on all supported python versions.  To use it, "pip install tox"
    # and then run "tox" from this directory.
    #
    # This configuration requires tox 1.8 or higher.
    #
    # Installation tips:
    # When building pycurl on my macports-based setup, I need to either set the
    # environment variable ARCHFLAGS='-arch x86_64' or use
    # 'port install curl +universal' to get both 32- and 64-bit versions of
    # libcurl.
    [tox]
    envlist =
            # Basic configurations: Run the tests in both minimal installations
            # and with all optional dependencies.
            # (pypy3 doesn't have any optional deps yet)
            {py27,pypy,py33,py34,py35,py36,pypy3},
            {py27,pypy,py33,py34,py35,py36}-full,
    
            # Also run the tests with each possible replacement of a default
            # component.  Run each test on both python 2 and 3 where possible.
            # (Only one 2.x and one 3.x unless there are known differences).
            # py2 and py3 are aliases for py27-full and py35-full.
    
            # Alternate HTTP clients.
            {py2,py3}-curl,
    
            # Alternate IOLoops.
            {py2,py3}-select,
            {py2,py3}-full-twisted,
            py2-twistedlayered,
            {py3,py33}-full-asyncio,
            py2-full-trollius,
    
            # Alternate Resolvers.
            {py2,py3}-full-{threadedresolver},
            {py2,py3}-full-caresresolver,
    
            # Other configurations; see comments below.
            {py2,py3}-{monotonic,opt},
            py3-{lang_c,lang_utf8},
            py2-locale,
            {py27,py3}-unittest2,
    
            # Ensure the sphinx build has no errors or warnings
            py3-sphinx-docs,
            # Run the doctests via sphinx (which covers things not run
            # in the regular test suite and vice versa)
            {py2,py3}-sphinx-doctest
    
    [testenv]
    # Most of these are defaults, but if you specify any you can't fall back
    # defaults for the others.
    basepython =
               py27: python2.7
               py33: python3.3
               py34: python3.4
               py35: python3.5
               py36: python3.6
               pypy: pypy
               pypy3: pypy3
               py2: python2.7
               py3: python3.6
    
    deps =
         # unittest2 doesn't add anything we need on 2.7+, but we should ensure that
         # its existence doesn't break anything due to conditional imports.
         py27-unittest2: unittest2
         py3-unittest2: unittest2py3k
         # cpython-only deps: pycurl installs but curl_httpclient doesn't work;
         # twisted mostly works but is a bit flaky under pypy.
         {py27,py33,py34,py35,py36}-full: pycurl
         {py2,py3}: pycurl>=7.19.3.1
         # twisted is cpython only.
         {py27,py33,py34,py35,py36}-full: twisted
         {py2,py3}: twisted
         # pycares installation currently fails on py33
         # (https://github.com/pypa/pip/pull/816)
         {py2,py3,py27,py33,py34,py35,py36}-full: pycares
         # futures became standard in py32
         {py2,py27,pypy}-full: futures
         # mock became standard in py33
         {py2,py27,pypy,py3,pypy3}-full: mock
         # singledispatch became standard in py34.
         {py2,py27,pypy,py3,py33}-full: singledispatch
         py33-asyncio: asyncio
         trollius: trollius
         py2-monotonic: monotonic
         sphinx: sphinx
         sphinx: sphinx_rtd_theme
    
    setenv =
           # The extension is mandatory on cpython.
           {py2,py27,py3,py33,py34,py35,py36}: TORNADO_EXTENSION=1
           # In python 3, opening files in text mode uses a
           # system-dependent encoding by default.  Run the tests with "C"
           # (ascii) and "utf-8" locales to ensure we don't have hidden
           # dependencies on this setting.
           lang_c: LANG=C
           lang_utf8: LANG=en_US.utf-8
           # tox's parser chokes if all the setenv entries are conditional.
           DUMMY=dummy
           {py2,py27,py3,py33,py34,py35,py36}-no-ext: TORNADO_EXTENSION=0
    
    # All non-comment lines but the last must end in a backslash.
    # Tox filters line-by-line based on the environment name.
    commands =
             python \
             # py3*: -b turns on an extra warning when calling
             # str(bytes), and -bb makes it an error.
             {py3,py33,py34,py35,py36,pypy3}: -bb \
             # Python's optimized mode disables the assert statement, so
             # run the tests in this mode to ensure we haven't fallen into
             # the trap of relying on an assertion's side effects or using
             # them for things that should be runtime errors.
             opt: -O \
             -m tornado.test.runtests \
             # Note that httpclient_test is always run with both client
             # implementations; this flag controls which client all the
             # other tests use.
             curl: --httpclient=tornado.curl_httpclient.CurlAsyncHTTPClient \
             select: --ioloop=tornado.platform.select.SelectIOLoop \
             twisted: --ioloop=tornado.platform.twisted.TwistedIOLoop \
             twistedlayered: --ioloop=tornado.test.twisted_test.LayeredTwistedIOLoop --resolver=tornado.platform.twisted.TwistedResolver \
             {asyncio,trollius}: --ioloop=tornado.platform.asyncio.AsyncIOLoop \
             caresresolver: --resolver=tornado.platform.caresresolver.CaresResolver \
             threadedresolver: --resolver=tornado.netutil.ThreadedResolver \
             monotonic: --ioloop_time_monotonic \
             # Test with a non-english locale to uncover str/bytes mixing issues.
             locale: --locale=zh_TW \
             {posargs:}
    
    # python will import relative to the current working directory by default,
    # so cd into the tox working directory to avoid picking up the working
    # copy of the files (especially important for the speedups module).
    changedir = {toxworkdir}
    
    # tox 1.6 passes --pre to pip by default, which currently has problems
    # installing pycurl and monotime (https://github.com/pypa/pip/issues/1405).
    # Remove it (it's not a part of {opts}) to only install real releases.
    install_command = pip install {opts} {packages}
    
    [testenv:py3-sphinx-docs]
    changedir = docs
    # For some reason the extension fails to load in this configuration,
    # but it's not really needed for docs anyway.
    setenv = TORNADO_EXTENSION=0
    commands =
        sphinx-build -q -E -n -W -b html . {envtmpdir}/html
    
    [testenv:py2-sphinx-doctest]
    changedir = docs
    setenv = TORNADO_EXTENSION=0
    # No -W for doctests because that disallows tests with empty output.
    commands =
         sphinx-build -q -E -n -b doctest . {envtmpdir}/doctest
    
    [testenv:py3-sphinx-doctest]
    changedir = docs
    setenv = TORNADO_EXTENSION=0
    commands =
         sphinx-build -q -E -n -b doctest . {envtmpdir}/doctest