eventlet-0.30.2/0000755000076500000240000000000014017673044014062 5ustar temotostaff00000000000000eventlet-0.30.2/AUTHORS0000644000076500000240000001331514006212666015132 0ustar temotostaff00000000000000Maintainer (i.e., Who To Hassle If You Find Bugs) ------------------------------------------------- Jakub Stasiak Nat Goodspeed The current maintainer(s) are volunteers with unrelated jobs. We can only pay sporadic attention to responding to your issue and pull request submissions. Your patience is greatly appreciated! Original Authors ---------------- * Bob Ippolito * Donovan Preston Contributors ------------ * AG Projects * Chris AtLee * R\. Tyler Ballance * Denis Bilenko * Mike Barton * Patrick Carlisle * Ben Ford * Andrew Godwin * Brantley Harris * Gregory Holt * Joe Malicki * Chet Murthy * Eugene Oden * radix * Scott Robinson * Tavis Rudd * Sergey Shepelev * Chuck Thier * Nick V * Daniele Varrazzo * Ryan Williams * Geoff Salmon * Edward George * Floris Bruynooghe * Paul Oppenheim * Jakub Stasiak * Aldona Majorek * Victor Sergeyev * David Szotten * Victor Stinner * Samuel Merritt * Eric Urban Linden Lab Contributors ----------------------- * John Beisley * Tess Chu * Nat Goodspeed * Dave Kaprielian * Kartic Krishnamurthy * Bryan O'Sullivan * Kent Quirk * Ryan Williams Thanks To --------- * AdamKG, giving the hint that invalid argument errors were introduced post-0.9.0 * Luke Tucker, bug report regarding wsgi + webob * Taso Du Val, reproing an exception squelching bug, saving children's lives ;-) * Luci Stanescu, for reporting twisted hub bug * Marcus Cavanaugh, for test case code that has been incredibly useful in tracking down bugs * Brian Brunswick, for many helpful questions and suggestions on the mailing list * Cesar Alaniz, for uncovering bugs of great import * the grugq, for contributing patches, suggestions, and use cases * Ralf Schmitt, for wsgi/webob incompatibility bug report and suggested fix * Benoit Chesneau, bug report on green.os and patch to fix it * Slant, better iterator implementation in tpool * Ambroff, nice pygtk hub example * Michael Carter, websocket patch to improve location handling * Marcin Bachry, nice repro of a bug and good diagnosis leading to the fix * David Ziegler, reporting issue #53 * Favo Yang, twisted hub patch * Schmir, patch that fixes readline method with chunked encoding in wsgi.py, advice on patcher * Slide, for open-sourcing gogreen * Holger Krekel, websocket example small fix * mikepk, debugging MySQLdb/tpool issues * Malcolm Cleaton, patch for Event exception handling * Alexey Borzenkov, for finding and fixing issues with Windows error detection (#66, #69), reducing dependencies in zeromq hub (#71) * Anonymous, finding and fixing error in websocket chat example (#70) * Edward George, finding and fixing an issue in the [e]poll hubs (#74), and in convenience (#86) * Ruijun Luo, figuring out incorrect openssl import for wrap_ssl (#73) * rfk, patch to get green zmq to respect noblock flag. * Soren Hansen, finding and fixing issue in subprocess (#77) * Stefano Rivera, making tests pass in absence of postgres (#78) * Joshua Kwan, fixing busy-wait in eventlet.green.ssl. * Nick Vatamaniuc, Windows SO_REUSEADDR patch (#83) * Clay Gerrard, wsgi handle socket closed by client (#95) * Eric Windisch, zmq getsockopt(EVENTS) wake correct threads (pull request 22) * Raymond Lu, fixing busy-wait in eventlet.green.ssl.socket.sendall() * Thomas Grainger, webcrawler example small fix, "requests" library import bug report, Travis integration * Peter Portante, save syscalls in socket.dup(), environ[REMOTE_PORT] in wsgi * Peter Skirko, fixing socket.settimeout(0) bug * Derk Tegeler, Pre-cache proxied GreenSocket methods (Bitbucket #136) * David Malcolm, optional "timeout" argument to the subprocess module (Bitbucket #89) * David Goetz, wsgi: Allow minimum_chunk_size to be overriden on a per request basis * Dmitry Orlov, websocket: accept Upgrade: websocket (lowercase) * Zhang Hua, profile: accumulate results between runs (Bitbucket #162) * Astrum Kuo, python3 compatibility fixes; greenthread.unlink() method * Davanum Srinivas, Python3 compatibility fixes * Dmitriy Kruglyak, PyPy 2.3 compatibility fix * Jan Grant, Michael Kerrin, second simultaneous read (GH-94) * Simon Jagoe, Python3 octal literal fix * Tushar Gohad, wsgi: Support optional headers w/ "100 Continue" responses * raylu, fixing operator precedence bug in eventlet.wsgi * Christoph Gysin, PEP 8 conformance * Andrey Gubarev * Corey Wright * Deva * Johannes Erdfelt * Kevin * QthCN * Steven Hardy * Stuart McLaren * Tomaz Muraus * ChangBo Guo(gcb), fixing typos in the documentation (GH-194) * Marc Abramowitz, fixing the README so it renders correctly on PyPI (GH-183) * Shaun Stanworth, equal chance to acquire semaphore from different greenthreads (GH-136) * Lior Neudorfer, Make sure SSL retries are done using the exact same data buffer * Sean Dague, wsgi: Provide python logging compatibility * Tim Simmons, Use _socket_nodns and select in dnspython support * Antonio Cuni, fix fd double close on PyPy * Seyeong Kim * Ihar Hrachyshka * Janusz Harkot * Fukuchi Daisuke * Ramakrishnan G * ashutosh-mishra * Azhar Hussain * Josh VanderLinden * Levente Polyak * Phus Lu * Collin Stocks, fixing eventlet.green.urllib2.urlopen() so it accepts cafile, capath, or cadefault arguments * Alexis Lee * Steven Erenst * Piët Delport * Alex Villacís Lasso * Yashwardhan Singh * Tim Burke * Ondřej Nový * Jarrod Johnson * Whitney Young * Matthew D. Pagel * Matt Yule-Bennett * Artur Stawiarski * Tal Wrii * Roman Podoliaka * Gevorg Davoian * Ondřej Kobližek * Yuichi Bando * Feng * Aayush Kasurde * Linbing * Geoffrey Thomas * Costas Christofi, adding permessage-deflate weboscket extension support * Peter Kovary, adding permessage-deflate weboscket extension support * Konstantin Enchant * James Page * Stefan Nica * Haikel Guemar * Miguel Grinberg * Chris Kerr * Anthony Sottile * Quan Tian * orishoshan * Matt Bennett * Ralf Haferkamp * Jake Tesler * Aayush Kasurde eventlet-0.30.2/LICENSE0000644000076500000240000000234614006212666015071 0ustar temotostaff00000000000000Unless otherwise noted, the files in Eventlet are under the following MIT license: Copyright (c) 2005-2006, Bob Ippolito Copyright (c) 2007-2010, Linden Research, Inc. Copyright (c) 2008-2010, Eventlet Contributors (see AUTHORS) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. eventlet-0.30.2/MANIFEST.in0000644000076500000240000000027214006212666015616 0ustar temotostaff00000000000000recursive-include tests *.py *.crt *.key recursive-include doc *.rst *.txt *.py Makefile *.png recursive-include examples *.py *.html include MANIFEST.in NEWS AUTHORS LICENSE README.rst eventlet-0.30.2/NEWS0000644000076500000240000011601414017672757014576 0ustar temotostaff000000000000000.30.2 ====== * greendns: patch ssl to fix RecursionError on SSLContext.options.__set__ https://github.com/eventlet/eventlet/issues/677 0.30.1 ====== * patcher: built-in open() did not accept kwargs https://github.com/eventlet/eventlet/issues/683 0.30.0 ====== * pyopenssl tsafe module was deprecated and removed in v20.0.0 * deprecate pyevent hub * Deprecate CPython 2.7 and 3.4 support * py39: Add _at_fork_reinit method to Semaphores 0.29.1 ====== patcher: [py27] recursion error in pytest/python2.7 installing register_at_fork https://github.com/eventlet/eventlet/issues/660 patcher: monkey_patch(builtins=True) failed on py3 because `file` class is gone https://github.com/eventlet/eventlet/issues/541 don't crash on PyPy 7.0.0 https://github.com/eventlet/eventlet/pull/547 Only install monotonic on python2 https://github.com/eventlet/eventlet/pull/583 0.29.0 ====== * ssl: context wrapped listener fails accept() https://github.com/eventlet/eventlet/issues/651 0.28.1 ====== * Sorry, Eventlet was broken on Windows for versions 0.27-0.28 patcher: no os.register_at_fork on Windows (#654) * Clean up TypeError in __del__ 0.28.0 ====== * Always remove the right listener from the hub https://github.com/eventlet/eventlet/pull/645 0.27.0 ====== * patcher: Clean up threading book-keeping at fork when monkey-patched * backdoor: handle disconnects better 0.26.1 ====== * pin dnspython <2.0.0 https://github.com/eventlet/eventlet/issues/619 0.26.0 ====== * Fix compatibility with SSLContext usage >= Python 3.7 * wsgi: Fix header capitalization on py3 * Fix #508: Py37 Deadlock ThreadPoolExecutor (#598) * drop Python 3.4 support * Fix misc SyntaxWarning's under Python 3.8 * Remove unnecessary assignment in _recv_loop (#601) 0.25.2 ====== * green.ssl: redundant set_nonblocking() caused SSLWantReadError 0.25.1 ====== * wsgi (tests): Stop using deprecated cgi.parse_qs() to support Python 3.8; Thanks to Miro Hrončok * os: Add workaround to `open` for pathlib on py 3.7; Thanks to David Szotten 0.25.0 ====== * wsgi: Only send 100 Continue response if no response has been sent yet; Thanks to Tim Burke * wsgi: Return 400 on negative Content-Length request headers; Thanks to Tim Burke * Make a GreenPile with no spawn()s an empty sequence; Thanks to nat-goodspeed * wsgi: fix Input.readlines when dealing with chunked input; Thanks to Tim Burke * wsgi: fix Input.readline on Python 3; Thanks to Tim Burke * wsgi: Stop replacing invalid UTF-8 on py3; Thanks to Tim Burke * ssl: Fix compatibility with Python 3.7 ssl.SSLSocket; Thanks to Junyi * reimport submodule as well in patcher.inject; Thanks to Junyi * use Python 2 compatible syntax for keyword-only args; Thanks to nat-goodspeed * wsgi: Catch and swallow IOErrors during discard(); Thanks to Tim Burke * Fix for Python 3.7; Thanks to Marcel Plch * Fix race that could cause using epolls as default hub even when platform does not support it; Thanks to Sergey Shepelev * wsgi: make Expect 100-continue field-value case-insensitive; Thanks to Julien Kasarherou * greenthread: optimize _exit_funcs getattr/del dance; Thanks to Alex Kashirin * New benchmarks runner; Thanks to Sergey Shepelev * ssl: fix connect to use monotonic clock for timeout; Thanks to Sergey Shepelev 0.24.1 ====== * greendns: don't contact nameservers if one entry is returned from hosts file; Thanks to Daniel Alvarez 0.24.0 ====== * greendns: Fix infinite loop when UDP source address mismatch; Thanks to Lon Hohberger * greendns: Fix bad ipv6 comparison; Thanks to Lon Hohberger * wsgi: Use byte strings on py2 and unicode strings on py3; Thanks to Tim Burke * pools: put to empty pool would block sometimes; Thanks to Sam Merritt * greendns: resolving over TCP produced ValueError; Thanks to Jaume Marhuenda * support.greendns: ImportError when dns.rdtypes was imported before eventlet; Thanks to Jaume Marhuenda * greendns: full comment lines were not skipped; Thanks to nat-goodspeed * Drop support for Python3.3; Python2.6 and python-epoll package * external dependencies for six, monotonic, dnspython; Thanks to nat-goodspeed * wsgi: Don't strip all Unicode whitespace from headers on py3; Thanks to Tim Burke 0.23.0 ====== * green.threading: current_thread() did not see new monkey-patched threads; Thanks to Jake Tesler * tpool: exception in tpool-ed call leaked memory via backtrace * wsgi: latin-1 encoding dance for environ[PATH_INFO] 0.22.1 ====== * Fixed issue installing excess enum34 on Python3.4+ (rebuild with updated setuptools) * event: Event.wait() timeout=None argument to be compatible with upstream CPython * greendns: Treat /etc/hosts entries case-insensitive; Thanks to Ralf Haferkamp 0.22.0 ====== * convenience: (SO_REUSEPORT) socket.error is not OSError on Python 2; Thanks to JacoFourie@github * convenience: SO_REUSEPORT is not available on WSL platform (Linux on Windows) * convenience: skip SO_REUSEPORT for bind on random port (0) * dns: reading /etc/hosts raised DeprecationWarning for universal lines on Python 3.4+; Thanks to Chris Kerr * green.openssl: Drop OpenSSL.rand support; Thanks to Haikel Guemar * green.subprocess: keep CalledProcessError identity; Thanks to Linbing@github * greendns: be explicit about expecting bytes from sock.recv; Thanks to Matt Bennett * greendns: early socket.timeout was breaking IO retry loops * GreenSocket.accept does not notify_open; Thanks to orishoshan * patcher: set locked RLocks' owner only when patching existing locks; Thanks to Quan Tian * patcher: workaround for monotonic "no suitable implementation"; Thanks to Geoffrey Thomas * queue: empty except was catching too much * socket: context manager support; Thanks to Miguel Grinberg * support: update monotonic 1.3 (5c0322dc559bf) * support: upgrade bundled dnspython to 1.16.0 (22e9de1d7957e) https://github.com/eventlet/eventlet/issues/427 * websocket: fd leak when client did not close connection properly; Thanks to Konstantin Enchant * websocket: support permessage-deflate extension; Thanks to Costas Christofi and Peter Kovary * wsgi: close idle connections (also applies to websockets) * wsgi: deprecated options are one step closer to removal * wsgi: handle remote connection resets; Thanks to Stefan Nica 0.21.0 ====== * New timeout error API: .is_timeout=True on exception object It's now easy to test if network error is transient and retry is appropriate. Please spread the word and invite other libraries to support this interface. * hubs: use monotonic clock by default (bundled package); Thanks to Roman Podoliaka and Victor Stinner * dns: EVENTLET_NO_GREENDNS option is back, green is still default * dns: hosts file was consulted after nameservers * ssl: RecursionError on Python3.6+; Thanks to justdoit0823@github and Gevent developers * wsgi: log_output=False was not disabling startup and accepted messages * greenio: Fixed OSError: [WinError 10038] Socket operation on nonsocket * dns: EAI_NODATA was removed from RFC3493 and FreeBSD * green.select: fix mark_as_closed() wrong number of args * green.zmq: socket.{recv,send}_* signatures did not match recent upstream pyzmq * New feature: Add zipkin tracing to eventlet * db_pool: proxy Connection.set_isolation_level() * green.zmq: support RCVTIMEO (receive timeout) * green.profile: Python3 compatibility; Thanks to Artur Stawiarski * support: upgrade bundled six to 1.10 (dbfbfc818e3d) * python3.6: http.client.request support chunked_encoding 0.20.1 ====== * dns: try unqualified queries as top level * test_import_patched_defaults bended to play with pyopenssl>=16.1.0 * Explicit environ flag for importing eventlet.__version__ without ignoring import errors * Type check Semaphore, GreenPool arguments; Thanks to Matthew D. Pagel 0.20.0 ====== * IMPORTANT: removed select.poll() function * DNS resolving is always green with dnspython bundled in * greenio: only trampoline when we block * convenience: listen() sets SO_REUSEPORT when available; Thanks to Zhengwei Gao * ssl: Fix "TypeError: read() argument 2 must be read-write bytes-like object, not None" * greenio: _recv_loop behaviour with recv_into on closed sock * ipv6: getaddrinfo would fail with scope index * green.zmq: Support {send,recv}_{string,json,pyobj} wrappers * greendns: Return answers from /etc/hosts despite nameserver errors * patcher: fixed green existing locks fail (Python3) * Add DAGPool, a dependency-driven greenthread pool * wsgi: Unix socket address representation; Thanks to Samuel Merritt * tpool: isolate internal socket from default timeout; Thanks to Alex Villacís Lasso * wsgi: only skip Content-Type and Content-Length headers (GH-327) * wsgi: 400 on blank Content-Length headers (GH-334) * greenio: makefile related pypy socket ref counting * ssl: Fix recv_into blocking when reading chunks of data * websocket: support Gunicorn environ['gunicorn.socket'] 0.19.0 ====== * ssl: IMPORTANT DoS FIX do_handshake_connect=False in server accept(); Thanks to Garth Mollett * patcher: patch existing threading locks; Thanks to Alexis Lee * green.urllib2: missing patched ssl module; Thanks to Collin RM Stocks * wsgi: environ[headers_raw] tuple of unmodified name: value pairs * test against modern pyopenssl 16.0.0 for Python 2.7+; Thanks to Victor Stinner * wsgi: document compatibility with python `logging` * Minor grammatical improvements and typo fixes to the docs; Thanks to Steven Erenst 0.18.4 ====== * wsgi: change TCP_NODELAY to TCP_QUICKACK, ignore socket error when not available 0.18.3 ====== * wsgi: Use buffered writes - fixes partial socket.send without custom writelines(); Github issue #295 * wsgi: TCP_NODELAY enabled by default 0.18.2 ====== * wsgi: Fix data loss on partial writes (socket.send); Thanks to Jakub Stasiak 0.18.1 ====== * IMPORTANT: do not use Eventlet 0.18.0 and 0.18.1 * patcher: Fix AttributeError in subprocess communicate() * greenio: Fix "TypeError: an integer is required" in sendto() 0.18.0 ====== * IMPORTANT: do not use Eventlet 0.18.0 and 0.18.1 * greenio: Fixed a bug that could cause send() to start an endless loop on ENOTCONN; Thanks to Seyeong Kim * wsgi: Fixed UNIX socket address being trimmed in "wsgi starting" log; Thanks to Ihar Hrachyshka * ssl: Ported eventlet.green.OpenSSL to Python 3; Thanks to Victor Stinner * greenio: Made read() support buflen=-1 and added readall() (Python 3); Thanks to David Szotten * wsgi: Made the error raised in case of chunk read failures more precise (this should be backwards compatible as the new exception class, wsgi.ChunkReadError, is a subclass of ValueError which was being used there before); Thanks to Samuel Merritt * greenio: Fixed socket.recv() sometimes returning str instead of bytes on Python 3; Thanks to Janusz Harkot * wsgi: Improved request body discarding * websocket: Fixed TypeError on empty websocket message (Python 3); Thanks to Fukuchi Daisuke * subprocess: Fixed universal_newlines support * wsgi: Output of 0-byte chunks is now suppressed; Thanks to Samuel Merritt * Improved the documentation; Thanks to Ramakrishnan G, ashutosh-mishra and Azhar Hussain * greenio: Changed GreenFileIO.write() (Python 3) to always write all data to match the behavior on Python 2; Thanks to Victor Stinner * subprocess: Fixed missing subprocess.mswindows attribute on Python 3.5; Thanks to Josh VanderLinden * ssl/monkey patching: Fixed a bug that would cause merely importing eventlet to monkey patch the ssl module; Thanks to David Szotten * documentation: Added support for building plain text documentation; thanks to Levente Polyak * greenio: Fixed handling blocking IO errors in various GreenSocket methods; Thanks to Victor Stinner * greenio: Fixed GreenPipe ignoring the bufsize parameter on Python 2; Thanks to Phus Lu * backdoor: Added Unix and IPv6 socket support; Thanks to Eric Urban Backwards incompatible: * monkey patching: The following select methods and selector classes are now removed, instead of being left in their respective modules after patching even though they are not green (this also fixes HTTPServer.serve_forever() blocking whole process on Python 3): * select.poll * select.epoll * select.devpoll * select.kqueue * select.kevent * selectors.PollSelector * selectors.EpollSelector * selectors.DevpollSelector * selectors.KqueueSelector Additionally selectors.DefaultSelector points to a green SelectSelector * greenio: Fixed send() to no longer behave like sendall() which makes it consistent with Python standard library and removes a source of very subtle errors 0.17.4 ====== * ssl: incorrect initalization of default context; Thanks to stuart-mclaren 0.17.3 ====== * green.thread: Python3.3+ fixes; Thanks to Victor Stinner * Semaphore.acquire() accepts timeout=-1; Thanks to Victor Stinner 0.17.2 ====== * wsgi: Provide python logging compatibility; Thanks to Sean Dague * greendns: fix premature connection closing in DNS proxy; Thanks to Tim Simmons * greenio: correct fd close; Thanks to Antonio Cuni and Victor Sergeyev * green.ssl: HTTPS client Python 2.7.9+ compatibility * setup: tests.{isolated,manual} polluted top-level packages 0.17.1 ====== * greendns: fix dns.name import and Python3 compatibility 0.17 ==== * Full Python3 compatibility; Thanks to Jakub Stasiak * greendns: IPv6 support, improved handling of /etc/hosts; Thanks to Floris Bruynooghe * tpool: make sure we return results during killall; Thanks to David Szotten * semaphore: Don't hog a semaphore if someone else is waiting for it; Thanks to Shaun Stanworth * green.socket: create_connection() was wrapping all exceptions in socket.error; Thanks to Donagh McCabe * Make sure SSL retries are done using the exact same data buffer; Thanks to Lior Neudorfer * greenio: shutdown already closed sockets without error; Thanks to David Szotten 0.16.1 ====== * Wheel build 0.16.0 incorrectly shipped removed module eventlet.util. 0.16.0 ====== * Fix SSL socket wrapping and Python 2.7.9 compatibility; Thanks to Jakub Stasiak * Fix monkey_patch() on Python 3; Thanks to Victor Stinner * Fix "maximum recursion depth exceeded in GreenSocket.__del__"; Thanks to Jakub Stasiak * db_pool: BaseConnectionPool.clear updates .current_size #139; Thanks to Andrey Gubarev * Fix __str__ method on the TimeoutExpired exception class.; Thanks to Tomaz Muraus * hubs: drop Twisted support * Removed deprecated modules: api, most of coros, pool, proc, processes and util * Improved Python 3 compatibility (including patch by raylu); Thanks to Jakub Stasiak * Allow more graceful shutdown of wsgi server; Thanks to Stuart McLaren * wsgi.input: Make send_hundred_continue_headers() a public API; Thanks to Tushar Gohad * tpool: Windows compatibility, fix ResourceWarning. Thanks to Victor Stinner * tests: Fix timers not cleaned up on MySQL test skips; Thanks to Corey Wright 0.15.2 ====== * greenio: fixed memory leak, introduced in 0.15.1; Thanks to Michael Kerrin, Tushar Gohad * wsgi: Support optional headers w/ "100 Continue" responses; Thanks to Tushar Gohad 0.15.1 ====== * greenio: Fix second simultaneous read (parallel paramiko issue); Thanks to Jan Grant, Michael Kerrin * db_pool: customizable connection cleanup function; Thanks to Avery Fay 0.15 ==== * Python3 compatibility -- **not ready yet**; Thanks to Astrum Kuo, Davanum Srinivas, Jakub Stasiak, Victor Sergeyev * coros: remove Actor which was deprecated in 2010-01 * saranwrap: remove saranwrap which was deprecated in 2010-02 * PyPy compatibility fixes; Thanks to Dmitriy Kruglyak, Jakub Stasiak * green.profile: accumulate results between runs; Thanks to Zhang Hua * greenthread: add .unlink() method; Thanks to Astrum Kuo * packaging: Generate universal wheels; Thanks to Jakub Stasiak * queue: Make join not wait if there are no unfinished tasks; Thanks to Jakub Stasiak * tpool: proxy __enter__, __exit__ fixes Bitbucket-158; Thanks to Eric Urban * websockets: Add websockets13 support; handle lack of Upgrade header; Thanks to Edward George * wsgi: capitalize_response_headers option 0.14 ==== * wsgi: handle connection socket timeouts; Thanks to Paul Oppenheim * wsgi: close timed out client connections * greenio: socket pypy compatibility; Thanks to Alex Gaynor * wsgi: env['wsgi.input'] was returning 1 byte strings; Thanks to Eric Urban * green.ssl: fix NameError; Github #17; Thanks to Jakub Stasiak * websocket: allow "websocket" in lowercase in Upgrade header; Compatibility with current Google Chrome; Thanks to Dmitry Orlov * wsgi: allow minimum_chunk_size to be overriden on a per request basis; Thanks to David Goetz * wsgi: configurable socket_timeout 0.13 ==== * hubs: kqueue support! Thanks to YAMAMOTO Takashi, Edward George * greenio: Fix AttributeError on MacOSX; Bitbucket #136; Thanks to Derk Tegeler * green: subprocess: Fix subprocess.communicate() block on Python 2.7; Thanks to Edward George * green: select: ensure that hub can .wait() at least once before timeout; Thanks to YAMAMOTO Takashi * tpool: single request queue to avoid deadlocks; Bitbucket pull request 31,32; Thanks to Edward George * zmq: pyzmq 13.x compatibility; Thanks to Edward George * green: subprocess: Popen.wait() accepts new `timeout` kwarg; Python 3.3 and RHEL 6.1 compatibility * hubs: EVENTLET_HUB can point to external modules; Thanks to Edward George * semaphore: support timeout for acquire(); Thanks to Justin Patrin * support: do not clear sys.exc_info if can be preserved (greenlet >= 0.3.2); Thanks to Edward George * Travis continous integration; Thanks to Thomas Grainger, Jakub Stasiak * wsgi: minimum_chunk_size of last Server altered all previous (global variable); Thanks to Jakub Stasiak * doc: hubs: Point to the correct function in exception message; Thanks to Floris Bruynooghe 0.12 ==== * zmq: Fix 100% busy CPU in idle after .bind(PUB) (thanks to Geoff Salmon) * greenio: Fix socket.settimeout() did not switch back to blocking mode (thanks to Peter Skirko) * greenio: socket.dup() made excess fcntl syscalls (thanks to Peter Portante) * setup: Remove legacy --without-greenlet option and unused httplib2 dependency (thanks to Thomas Grainger) * wsgi: environ[REMOTE_PORT], also available in log_format, log accept event (thanks to Peter Portante) * tests: Support libzmq 3.0 SNDHWM option (thanks to Geoff Salmon) 0.11 ==== * ssl: Fix 100% busy CPU in socket.sendall() (thanks to Raymon Lu) * zmq: Return linger argument to Socket.close() (thanks to Eric Windisch) * tests: SSL tests were always skipped due to bug in skip_if_no_ssl decorator 0.10 ==== * greenio: Fix relative seek() (thanks to AlanP) * db_pool: Fix pool.put() TypeError with min_size > 1 (thanks to Jessica Qi) * greenthread: Prevent infinite recursion with linking to current greenthread (thanks to Edward George) * zmq: getsockopt(EVENTS) wakes correct threads (thanks to Eric Windisch) * wsgi: Handle client disconnect while sending response (thanks to Clay Gerrard) * hubs: Ensure that new hub greenlet is parent of old one (thanks to Edward George) * os: Fix waitpid() returning (0, 0) (thanks to Vishvananda Ishaya) * tpool: Add set_num_threads() method to set the number of tpool threads (thanks to David Ibarra) * threading, zmq: Fix Python 2.5 support (thanks to Floris Bruynooghe) * tests: tox configuration for all supported Python versions (thanks to Floris Bruynooghe) * tests: Fix zmq._QueueLock test in Python2.6 * tests: Fix patcher_test on Darwin (/bin/true issue) (thanks to Edward George) * tests: Skip SSL tests when not available (thanks to Floris Bruynooghe) * greenio: Remove deprecated GreenPipe.xreadlines() method, was broken anyway 0.9.17 ====== * ZeroMQ support calling send and recv from multiple greenthreads (thanks to Geoff Salmon) * SSL: unwrap() sends data, and so it needs trampolining (#104 thanks to Brandon Rhodes) * hubs.epolls: Fix imports for exception handler (#123 thanks to Johannes Erdfelt) * db_pool: Fix .clear() when min_size > 0 * db_pool: Add MySQL's insert_id() method (thanks to Peter Scott) * db_pool: Close connections after timeout, fix get-after-close race condition with using TpooledConnectionPool (thanks to Peter Scott) * threading monkey patch fixes (#115 thanks to Johannes Erdfelt) * pools: Better accounting of current_size in pools.Pool (#91 thanks to Brett Hoerner) * wsgi: environ['RAW_PATH_INFO'] with request path as received from client (thanks to dweimer) * wsgi: log_output flag (thanks to Juan Manuel Garcia) * wsgi: Limit HTTP header size (thanks to Gregory Holt) * wsgi: Configurable maximum URL length (thanks to Tomas Sedovic) 0.9.16 ====== * SO_REUSEADDR now correctly set. 0.9.15 ====== * ZeroMQ support without an explicit hub now implemented! Thanks to Zed Shaw for the patch. * zmq module supports the NOBLOCK flag, thanks to rfk. (#76) * eventlet.wsgi has a debug flag which can be set to false to not send tracebacks to the client (per redbo's request) * Recursive GreenPipe madness forestalled by Soren Hansen (#77) * eventlet.green.ssl no longer busywaits on send() * EEXIST ignored in epoll hub (#80) * eventlet.listen's behavior on Windows improved, thanks to Nick Vatamaniuc (#83) * Timeouts raised within tpool.execute are propagated back to the caller (thanks again to redbo for being the squeaky wheel) 0.9.14 ====== * Many fixes to the ZeroMQ hub, which now requires version 2.0.10 or later. Thanks to Ben Ford. * ZeroMQ hub no longer depends on pollhub, and thus works on Windows (thanks, Alexey Borzenkov) * Better handling of connect errors on Windows, thanks again to Alexey Borzenkov. * More-robust Event delivery, thanks to Malcolm Cleaton * wsgi.py now distinguishes between an empty query string ("") and a non-existent query string (no entry in environ). * wsgi.py handles ipv6 correctly (thanks, redbo) * Better behavior in tpool when you give it nonsensical numbers, thanks to R. Tyler for the nonsense. :) * Fixed importing on 2.5 (#73, thanks to Ruijun Luo) * Hub doesn't hold on to invalid fds (#74, thanks to Edward George) * Documentation for eventlet.green.zmq, courtesy of Ben Ford 0.9.13 ====== * ZeroMQ hub, and eventlet.green.zmq make supersockets green. Thanks to Ben Ford! * eventlet.green.MySQLdb added. It's an interface to MySQLdb that uses tpool to make it appear nonblocking * Greenthread affinity in tpool. Each greenthread is assigned to the same thread when using tpool, making it easier to work with non-thread-safe libraries. * Eventlet now depends on greenlet 0.3 or later. * Fixed a hang when using tpool during an import causes another import. Thanks to mikepk for tracking that down. * Improved websocket draft 76 compliance, thanks to Nick V. * Rare greenthread.kill() bug fixed, which was probably brought about by a bugfix in greenlet 0.3. * Easy_installing eventlet should no longer print an ImportError about greenlet * Support for serving up SSL websockets, thanks to chwagssd for reporting #62 * eventlet.wsgi properly sets 'wsgi.url_scheme' environment variable to 'https', and 'HTTPS' to 'on' if serving over ssl * Blocking detector uses setitimer on 2.6 or later, allowing for sub-second block detection, thanks to rtyler. * Blocking detector is documented now, too * socket.create_connection properly uses dnspython for nonblocking dns. Thanks to rtyler. * Removed EVENTLET_TPOOL_DNS, nobody liked that. But if you were using it, install dnspython instead. Thanks to pigmej and gholt. * Removed _main_wrapper from greenthread, thanks to Ambroff adding keyword arguments to switch() in 0.3! 0.9.12 ====== * Eventlet no longer uses the Twisted hub if Twisted is imported -- you must call eventlet.hubs.use_hub('twistedr') if you want to use it. This prevents strange race conditions for those who want to use both Twisted and Eventlet separately. * Removed circular import in twistedr.py * Added websocket multi-user chat example * Not using exec() in green modules anymore. * eventlet.green.socket now contains all attributes of the stdlib socket module, even those that were left out by bugs. * Eventlet.wsgi doesn't call print anymore, instead uses the logfiles for everything (it used to print exceptions in one place). * Eventlet.wsgi properly closes the connection when an error is raised * Better documentation on eventlet.event.Event.send_exception * Adding websocket.html to tarball so that you can run the examples without checking out the source 0.9.10 ====== * Greendns: if dnspython is installed, Eventlet will automatically use it to provide non-blocking DNS queries. Set the environment variable 'EVENTLET_NO_GREENDNS' if you don't want greendns but have dnspython installed. * Full test suite passes on Python 2.7. * Tests no longer depend on simplejson for >2.6. * Potential-bug fixes in patcher (thanks to Schmir, and thanks to Hudson) * Websockets work with query strings (thanks to mcarter) * WSGI posthooks that get called after the request completed (thanks to gholt, nice docs, too) * Blocking detector merged -- use it to detect places where your code is not yielding to the hub for > 1 second. * tpool.Proxy can wrap callables * Tweaked Timeout class to do something sensible when True is passed to the constructor 0.9.9 ===== * A fix for monkeypatching on systems with psycopg version 2.0.14. * Improved support for chunked transfers in wsgi, plus a bunch of tests from schmir (ported from gevent by redbo) * A fix for the twisted hub from Favo Yang 0.9.8 ===== * Support for psycopg2's asynchronous mode, from Daniele Varrazzo * websocket module is now part of core Eventlet with 100% unit test coverage thanks to Ben Ford. See its documentation at http://eventlet.net/doc/modules/websocket.html * Added wrap_ssl convenience method, meaning that we truly no longer need api or util modules. * Multiple-reader detection code protects against the common mistake of having multiple greenthreads read from the same socket at the same time, which can be overridden if you know what you're doing. * Cleaner monkey_patch API: the "all" keyword is no longer necessary. * Pool objects have a more convenient constructor -- no more need to subclass * amajorek's reimplementation of GreenPipe * Many bug fixes, major and minor. 0.9.7 ===== * GreenPipe is now a context manager (thanks, quad) * tpool.Proxy supports iterators properly * bug fixes in eventlet.green.os (thanks, Benoit) * much code cleanup from Tavis * a few more example apps * multitudinous improvements in Py3k compatibility from amajorek 0.9.6 ===== * new EVENTLET_HUB environment variable allows you to select a hub without code * improved GreenSocket and GreenPipe compatibility with stdlib * bugfixes on GreenSocket and GreenPipe objects * code coverage increased across the board * Queue resizing * internal DeprecationWarnings largely eliminated * tpool is now reentrant (i.e., can call tpool.execute(tpool.execute(foo))) * more reliable access to unpatched modules reduces some race conditions when monkeypatching * completely threading-compatible corolocal implementation, plus tests and enthusiastic adoption * tests stomp on each others' toes less * performance improvements in timers, hubs, greenpool * Greenlet-aware profile module courtesy of CCP * support for select26 module's epoll * better PEP-8 compliance and import cleanup * new eventlet.serve convenience function for easy TCP servers 0.9.5 ===== * support psycopg in db_pool * smart patcher that does the right patching when importing without needing to understand plumbing of patched module * patcher.monkey_patch() method replacing util.wrap_* * monkeypatch threading support * removed api.named * imported timeout module from gevent, replace exc_after and with_timeout() * replace call_after with spawn_after; this is so that users don't see the Timer class * added cancel() method to GreenThread to support the semantic of "abort if not already in the middle of something" * eventlet.green.os with patched read() and write(), etc * moved stuff from wrap_pipes_with_coroutine_pipe into green.os * eventlet.green.subprocess instead of eventlet.processes * improve patching docs, explaining more about patcher and why you'd use eventlet.green * better documentation on greenpiles * deprecate api.py completely * deprecate util.py completely * deprecate saranwrap * performance improvements in the hubs * much better documentation overall * new convenience functions: eventlet.connect and eventlet.listen. Thanks, Sergey! 0.9.4 ===== * Deprecated coros.Queue and coros.Channel (use queue.Queue instead) * Added putting and getting methods to queue.Queue. * Added eventlet.green.Queue which is a greened clone of stdlib Queue, along with stdlib tests. * Changed __init__.py so that the version number is readable even if greenlet's not installed. * Bugfixes in wsgi, greenpool 0.9.3 ===== * Moved primary api module to __init__ from api. It shouldn't be necessary to import eventlet.api anymore; import eventlet should do the same job. * Proc module deprecated in favor of greenthread * New module greenthread, with new class GreenThread. * New GreenPool class that replaces pool.Pool. * Deprecated proc module (use greenthread module instead) * tpooled gethostbyname is configurable via environment variable EVENTLET_TPOOL_GETHOSTBYNAME * Removed greenio.Green_fileobject and refactored the code therein to be more efficient. Only call makefile() on sockets now; makeGreenFile() is deprecated. The main loss here is that of the readuntil method. Also, Green_fileobjects used to be auto-flushing; flush() must be called explicitly now. * Added epoll support * Improved documentation across the board. * New queue module, API-compatible with stdlib Queue * New debug module, used for enabling verbosity within Eventlet that can help debug applications or Eventlet itself. * Bugfixes in tpool, green.select, patcher * Deprecated coros.execute (use eventlet.spawn instead) * Deprecated coros.semaphore (use semaphore.Semaphore or semaphore.BoundedSemaphore instead) * Moved coros.BoundedSemaphore to semaphore.BoundedSemaphore * Moved coros.Semaphore to semaphore.Semaphore * Moved coros.event to event.Event * Deprecated api.tcp_listener, api.connect_tcp, api.ssl_listener * Moved get_hub, use_hub, get_default_hub from eventlet.api to eventlet.hubs * Renamed libevent hub to pyevent. * Removed previously-deprecated features tcp_server, GreenSSL, erpc, and trap_errors. * Removed saranwrap as an option for making db connections nonblocking in db_pool. 0.9.2 ===== * Bugfix for wsgi.py where it was improperly expecting the environ variable to be a constant when passed to the application. * Tpool.py now passes its tests on Windows. * Fixed minor performance issue in wsgi. 0.9.1 ===== * PyOpenSSL is no longer required for Python 2.6: use the eventlet.green.ssl module. 2.5 and 2.4 still require PyOpenSSL. * Cleaned up the eventlet.green packages and their associated tests, this should result in fewer version-dependent bugs with these modules. * PyOpenSSL is now fully wrapped in eventlet.green.OpenSSL; using it is therefore more consistent with using other green modules. * Documentation on using SSL added. * New green modules: ayncore, asynchat, SimpleHTTPServer, CGIHTTPServer, ftplib. * Fuller thread/threading compatibility: patching threadlocal with corolocal so coroutines behave even more like threads. * Improved Windows compatibility for tpool.py * With-statement compatibility for pools.Pool objects. * Refactored copyrights in the files, added LICENSE and AUTHORS files. * Added support for logging x-forwarded-for header in wsgi. * api.tcp_server is now deprecated, will be removed in a future release. * Added instructions on how to generate coverage reports to the documentation. * Renamed GreenFile to Green_fileobject, to better reflect its purpose. * Deprecated erpc method in tpool.py * Bug fixes in: wsgi.py, twistedr.py, poll.py, greenio.py, util.py, select.py, processes.py, selects.py 0.9.0 ===== * Full-duplex sockets (simultaneous readers and writers in the same process). * Remove modules that distract from the core mission of making it straightforward to write event-driven networking apps: httpd, httpc, channel, greenlib, httpdate, jsonhttp, logutil * Removed test dependency on sqlite, using nose instead. * Marked known-broken tests using nose's mechanism (most of these are not broken but are simply run in the incorrect context, such as threading-related tests that are incompatible with the libevent hub). * Remove copied code from python standard libs (in tests). * Added eventlet.patcher which can be used to import "greened" modules. 0.8.16 ====== * GreenSSLObject properly masks ZeroReturnErrors with an empty read; with unit test. * Fixed 2.6 SSL compatibility issue. 0.8.15 ====== * GreenSSL object no longer converts ZeroReturnErrors into empty reads, because that is more compatible with the underlying SSLConnection object. * Fixed issue caused by SIGCHLD handler in processes.py * Stopped supporting string exceptions in saranwrap and fixed a few test failures. 0.8.14 ====== * Fixed some more Windows compatibility problems, resolving EVT-37 : http://jira.secondlife.com/browse/EVT-37 * waiting() method on Pool class, which was lost when the Pool implementation replaced CoroutinePool. 0.8.13 ====== * 2.6 SSL compatibility patch by Marcus Cavanaugh. * Added greenlet and pyopenssl as dependencies in setup.py. 0.8.12 ====== * The ability to resize() pools of coroutines, which was lost when the Pool implementation replaced CoroutinePool. * Fixed Cesar's issue with SSL connections, and furthermore did a complete overhaul of SSL handling in eventlet so that it's much closer to the behavior of the built-in libraries. In particular, users of GreenSSL sockets must now call shutdown() before close(), exactly like SSL.Connection objects. * A small patch that makes Eventlet work on Windows. This is the first release of Eventlet that works on Windows. 0.8.11 ====== Eventlet can now run on top of twisted reactor. Twisted-based hub is enabled automatically if twisted.internet.reactor is imported. It is also possible to "embed" eventlet into a twisted application via eventlet.twistedutil.join_reactor. See the examples for details. A new package, eventlet.twistedutil, is added that makes integration of twisted and eventlet easier. It has block_on function that allows to wait for a Deferred to fire and it wraps twisted's Protocol in a synchronous interface. This is similar to and is inspired by Christopher Armstrong's corotwine library. Thanks to Dan Pascu for reviewing the package. Another new package, eventlet.green, was added to provide some of the standard modules that are fixed not to block other greenlets. This is an alternative to monkey-patching the socket, which is impossible to do if you are running twisted reactor. The package includes socket, httplib, urllib2. Much of the core functionality has been refactored and cleaned up, including the removal of eventlet.greenlib. This means that it is now possible to use plain greenlets without modification in eventlet, and the subclasses of greenlet instead of the old eventlet.greenlib.GreenletContext. Calling eventlet.api.get_hub().switch() now checks to see whether the current greenlet has a "switch_out" method and calls it if so, providing the same functionality that the GreenletContext.swap_out used to. The swap_in behavior can be duplicated by overriding the switch method, and the finalize functionality can be duplicated by having a try: finally: block around the greenlet's main implementation. The eventlet.backdoor module has been ported to this new scheme, although it's signature had to change slightly so existing code that used the backdoor will have to be modified. A number of bugs related to improper scheduling of switch calls has been fixed. The fixed functions and classes include api.trampoline, api.sleep, coros.event, coros.semaphore, coros.queue. Many methods of greenio.GreenSocket were fixed to make its behavior more like that of a regular socket. Thanks to Marcin Bachry for fixing GreenSocket.dup to preserve the timeout. Added proc module which provides an easy way to subscribe to coroutine's results. This makes it easy to wait for a single greenlet or for a set of greenlets to complete. wsgi.py now supports chunked transfer requests (patch by Mike Barton) The following modules were deprecated or removed because they were broken: hubs.nginx, hubs.libev, support.pycurls, support.twisteds, cancel method of coros.event class The following classes are still present but will be removed in the future version: - channel.channel (use coros.Channel) - coros.CoroutinePool (use pool.Pool) saranwrap.py now correctly closes the child process when the referring object is deleted, received some fixes to its detection of child process death, now correctly deals with the in keyword, and it is now possible to use coroutines in a non-blocking fashion in the child process. Time-based expiry added to db_pool. This adds the ability to expire connections both by idleness and also by total time open. There is also a connection timeout option. A small bug in httpd's error method was fixed. Python 2.3 is no longer supported. A number of tests was added along with a script to run all of them for all the configurations. The script generates an html page with the results. Thanks to Brian Brunswick for investigation of popen4 badness (eventlet.process) Thanks to Marcus Cavanaugh for pointing out some coros.queue(0) bugs. The twisted integration as well as many other improvements were funded by AG Projects (http://ag-projects.com), thanks! 0.8.x ===== Fix a CPU leak that would cause the poll hub to consume 100% CPU in certain conditions, for example the echoserver example. (Donovan Preston) Fix the libev hub to match libev's callback signature. (Patch by grugq) Add a backlog argument to api.tcp_listener (Patch by grugq) 0.7.x ===== Fix a major memory leak when using the libevent or libev hubs. Timers were not being removed from the hub after they fired. (Thanks Agusto Becciu and the grugq). Also, make it possible to call wrap_socket_with_coroutine_socket without using the threadpool to make dns operations non-blocking (Thanks the grugq). It's now possible to use eventlet's SSL client to talk to eventlet's SSL server. (Thanks to Ryan Williams) Fixed a major CPU leak when using select hub. When adding a descriptor to the hub, entries were made in all three dictionaries, readers, writers, and exc, even if the callback is None. Thus every fd would be passed into all three lists when calling select regardless of whether there was a callback for that event or not. When reading the next request out of a keepalive socket, the socket would come back as ready for writing, the hub would notice the callback is None and ignore it, and then loop as fast as possible consuming CPU. 0.6.x ===== Fixes some long-standing bugs where sometimes failures in accept() or connect() would cause the coroutine that was waiting to be double-resumed, most often resulting in SwitchingToDeadGreenlet exceptions as well as weird tuple-unpacking exceptions in the CoroutinePool main loop. 0.6.1: Added eventlet.tpool.killall. Blocks until all of the threadpool threads have been told to exit and join()ed. Meant to be used to clean up the threadpool on exit or if calling execv. Used by Spawning. 0.5.x ===== "The Pycon 2008 Refactor": The first release which incorporates libevent support. Also comes with significant refactoring and code cleanup, especially to the eventlet.wsgi http server. Docstring coverage is much higher and there is new extensive documentation: http://wiki.secondlife.com/wiki/Eventlet/Documentation The point releases of 0.5.x fixed some bugs in the wsgi server, most notably handling of Transfer-Encoding: chunked; previously, it would happily send chunked encoding to clients which asked for HTTP/1.0, which isn't legal. 0.2 ===== Initial re-release of forked linden branch. eventlet-0.30.2/PKG-INFO0000644000076500000240000001100714017673044015156 0ustar temotostaff00000000000000Metadata-Version: 1.1 Name: eventlet Version: 0.30.2 Summary: Highly concurrent networking library Home-page: http://eventlet.net Author: Linden Lab Author-email: eventletdev@lists.secondlife.com License: UNKNOWN Description: Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it. It uses epoll or libevent for highly scalable non-blocking I/O. Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O. The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application. It's easy to get started using Eventlet, and easy to convert existing applications to use it. Start off by looking at the `examples`_, `common design patterns`_, and the list of `basic API primitives`_. .. _examples: http://eventlet.net/doc/examples.html .. _common design patterns: http://eventlet.net/doc/design_patterns.html .. _basic API primitives: http://eventlet.net/doc/basic_usage.html Quick Example =============== Here's something you can try right on the command line:: % python3 >>> import eventlet >>> from eventlet.green.urllib.request import urlopen >>> gt = eventlet.spawn(urlopen, 'http://eventlet.net') >>> gt2 = eventlet.spawn(urlopen, 'http://secondlife.com') >>> gt2.wait() >>> gt.wait() Getting Eventlet ================== The easiest way to get Eventlet is to use pip:: pip install -U eventlet To install latest development version once:: pip install -U https://github.com/eventlet/eventlet/archive/master.zip Building the Docs Locally ========================= To build a complete set of HTML documentation, you must have Sphinx, which can be found at http://sphinx.pocoo.org/ (or installed with `pip install Sphinx`):: cd doc make html The built html files can be found in doc/_build/html afterward. Twisted ======= Eventlet had Twisted hub in the past, but community interest to this integration has dropped over time, now it is not supported, so with apologies for any inconvenience we discontinue Twisted integration. If you have a project that uses Eventlet with Twisted, your options are: * use last working release eventlet==0.14 * start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13, `EVENTLET_HUB` environment variable can point to external modules. * fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project. Apologies for any inconvenience. Supported Python versions ========================= Currently CPython 2.7 and 3.4+ are supported, but **2.7 and 3.4 support is deprecated and will be removed in the future**, only CPython 3.5+ support will remain. Flair ===== .. image:: https://img.shields.io/pypi/v/eventlet :target: https://pypi.org/project/eventlet/ .. image:: https://travis-ci.org/eventlet/eventlet.svg?branch=master :target: https://travis-ci.org/eventlet/eventlet .. image:: https://codecov.io/gh/eventlet/eventlet/branch/master/graph/badge.svg :target: https://codecov.io/gh/eventlet/eventlet Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python Classifier: Topic :: Internet Classifier: Topic :: Software Development :: Libraries :: Python Modules eventlet-0.30.2/README.rst0000644000076500000240000000570214006212666015552 0ustar temotostaff00000000000000Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it. It uses epoll or libevent for highly scalable non-blocking I/O. Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O. The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application. It's easy to get started using Eventlet, and easy to convert existing applications to use it. Start off by looking at the `examples`_, `common design patterns`_, and the list of `basic API primitives`_. .. _examples: http://eventlet.net/doc/examples.html .. _common design patterns: http://eventlet.net/doc/design_patterns.html .. _basic API primitives: http://eventlet.net/doc/basic_usage.html Quick Example =============== Here's something you can try right on the command line:: % python3 >>> import eventlet >>> from eventlet.green.urllib.request import urlopen >>> gt = eventlet.spawn(urlopen, 'http://eventlet.net') >>> gt2 = eventlet.spawn(urlopen, 'http://secondlife.com') >>> gt2.wait() >>> gt.wait() Getting Eventlet ================== The easiest way to get Eventlet is to use pip:: pip install -U eventlet To install latest development version once:: pip install -U https://github.com/eventlet/eventlet/archive/master.zip Building the Docs Locally ========================= To build a complete set of HTML documentation, you must have Sphinx, which can be found at http://sphinx.pocoo.org/ (or installed with `pip install Sphinx`):: cd doc make html The built html files can be found in doc/_build/html afterward. Twisted ======= Eventlet had Twisted hub in the past, but community interest to this integration has dropped over time, now it is not supported, so with apologies for any inconvenience we discontinue Twisted integration. If you have a project that uses Eventlet with Twisted, your options are: * use last working release eventlet==0.14 * start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13, `EVENTLET_HUB` environment variable can point to external modules. * fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project. Apologies for any inconvenience. Supported Python versions ========================= Currently CPython 2.7 and 3.4+ are supported, but **2.7 and 3.4 support is deprecated and will be removed in the future**, only CPython 3.5+ support will remain. Flair ===== .. image:: https://img.shields.io/pypi/v/eventlet :target: https://pypi.org/project/eventlet/ .. image:: https://travis-ci.org/eventlet/eventlet.svg?branch=master :target: https://travis-ci.org/eventlet/eventlet .. image:: https://codecov.io/gh/eventlet/eventlet/branch/master/graph/badge.svg :target: https://codecov.io/gh/eventlet/eventlet eventlet-0.30.2/benchmarks/0000755000076500000240000000000014017673043016176 5ustar temotostaff00000000000000eventlet-0.30.2/benchmarks/__init__.py0000644000076500000240000001561114006212666020311 0ustar temotostaff00000000000000from __future__ import print_function import argparse import gc import importlib import inspect import math import random import re import sys import timeit import eventlet import six # legacy, TODO convert context/localhost_socket benchmarks to new way def measure_best(repeat, iters, common_setup='pass', common_cleanup='pass', *funcs): funcs = list(funcs) results = dict((f, []) for f in funcs) for _ in range(repeat): random.shuffle(funcs) for func in funcs: gc.collect() t = timeit.Timer(func, setup=common_setup) results[func].append(t.timeit(iters)) common_cleanup() best_results = {} for func, times in six.iteritems(results): best_results[func] = min(times) return best_results class Benchmark: func = None name = '' iters = 0 ns_per_op = 0 allocs_per_op = 0 mb_per_s = 0 def __init__(self, **kwargs): for k, v in six.iteritems(kwargs): if not hasattr(self, k): raise AttributeError(k) setattr(self, k, v) def __str__(self): kvs = ', '.join('{}={}'.format(k, v) for k, v in six.iteritems(self.__dict__) if not k.startswith('_')) return 'Benchmark<{}>'.format(kvs) __repr__ = __str__ def format_result(self, name_pad_to=64): # format compatible with golang.org/x/tools/cmd/benchcmp return "Benchmark_{b.name}{pad}\t{b.iters}\t{b.ns_per_op} ns/op".format( b=self, pad=' ' * (name_pad_to + 1 - len(self.name))) def run(self, repeat=5): wrapper_time = _run_timeit(self.func, 0) times = [] for _ in range(repeat): t = _run_timeit(self.func, self.iters) if t == 0.0: raise Exception('{} time=0'.format(repr(self))) times.append(t) best_time = min(times) - wrapper_time self.ns_per_op = int((best_time * 1e9) / self.iters) def _run_timeit(func, number): # common setup gc.collect() manager = getattr(func, '_benchmark_manager', None) try: # TODO collect allocations count, memory usage # TODO collect custom MB/sec metric reported by benchmark if manager is not None: with manager(number) as ctx: return timeit.Timer(lambda: func(ctx)).timeit(number=number) else: return timeit.Timer(func).timeit(number=number) finally: # common cleanup eventlet.sleep(0.01) def optimal_iters(func, target_time): '''Find optimal number of iterations to run func closely >= target_time. ''' iters = 1 target_time = float(target_time) max_iters = int(getattr(func, '_benchmark_max_iters', 0)) # TODO automatically detect non-linear time growth scale_factor = getattr(func, '_benchmark_scale_factor', 0.0) for _ in range(10): if max_iters and iters > max_iters: return max_iters # print('try iters={iters}'.format(**locals())) t = _run_timeit(func, number=iters) # print('... t={t}'.format(**locals())) if t >= target_time: return iters if scale_factor: iters *= scale_factor continue # following assumes and works well for linear complexity target functions if t < (target_time / 2): # roughly target half optimal time, ensure iterations keep increasing iters = iters * (target_time / t / 2) + 1 # round up to nearest power of 10 iters = int(10 ** math.ceil(math.log10(iters))) elif t < target_time: # half/double dance is less prone to overshooting iterations iters *= 2 raise Exception('could not find optimal iterations for time={} func={}'.format(target_time, repr(func))) def collect(filter_fun): # running `python benchmarks/__init__.py` or `python -m benchmarks` # puts .../eventlet/benchmarks at top of sys.path, fix it to project root if sys.path[0].endswith('/benchmarks'): path = sys.path.pop(0) correct = path.rsplit('/', 1)[0] sys.path.insert(0, correct) common_prefix = 'benchmark_' result = [] # TODO step 1: put all toplevel benchmarking code under `if __name__ == '__main__'` # TODO step 2: auto import benchmarks/*.py, remove whitelist below # TODO step 3: convert existing benchmarks for name in ('hub_timers', 'spawn'): mod = importlib.import_module('benchmarks.' + name) for name, obj in inspect.getmembers(mod): if name.startswith(common_prefix) and inspect.isfunction(obj): useful_name = name[len(common_prefix):] if filter_fun(useful_name): result.append(Benchmark(name=useful_name, func=obj)) return result def noop(*a, **kw): pass def configure(manager=None, scale_factor=0.0, max_iters=0): def wrapper(func): func._benchmark_manager = manager func._benchmark_scale_factor = scale_factor func._benchmark_max_iters = max_iters return func return wrapper def main(): cmdline = argparse.ArgumentParser(description='Run benchmarks') cmdline.add_argument('-autotime', default=3.0, type=float, metavar='seconds', help='''autoscale iterations close to this time per benchmark, in seconds (default: %(default).1f)''') cmdline.add_argument('-collect', default=False, action='store_true', help='stop after collecting, useful for debugging this tool') cmdline.add_argument('-filter', default='', metavar='regex', help='process benchmarks matching regex (default: all)') cmdline.add_argument('-iters', default=None, type=int, metavar='int', help='force this number of iterations (default: auto)') cmdline.add_argument('-repeat', default=5, type=int, metavar='int', help='repeat each benchmark, report best result (default: %(default)d)') args = cmdline.parse_args() filter_re = re.compile(args.filter) bs = collect(filter_re.search) if args.filter and not bs: # TODO stderr print('error: no benchmarks matched by filter "{}"'.format(args.filter)) sys.exit(1) if args.collect: bs.sort(key=lambda b: b.name) print('\n'.join(b.name for b in bs)) return if not bs: raise Exception('no benchmarks to run') # execute in random order random.shuffle(bs) for b in bs: b.iters = args.iters or optimal_iters(b.func, target_time=args.autotime) b.run() # print results in alphabetic order max_name_len = max(len(b.name) for b in bs) bs.sort(key=lambda b: b.name) for b in bs: print(b.format_result(name_pad_to=max_name_len)) if __name__ == '__main__': try: main() except KeyboardInterrupt: sys.exit(1) eventlet-0.30.2/benchmarks/localhost_socket.py0000644000076500000240000000676014006212666022117 0ustar temotostaff00000000000000"""Benchmark evaluating eventlet's performance at speaking to itself over a localhost socket.""" from __future__ import print_function import time import benchmarks import six BYTES = 1000 SIZE = 1 CONCURRENCY = 50 TRIES = 5 def reader(sock): expect = BYTES while expect > 0: d = sock.recv(min(expect, SIZE)) expect -= len(d) def writer(addr, socket_impl): sock = socket_impl(socket.AF_INET, socket.SOCK_STREAM) sock.connect(addr) sent = 0 while sent < BYTES: d = 'xy' * (max(min(SIZE / 2, BYTES - sent), 1)) sock.sendall(d) sent += len(d) def green_accepter(server_sock, pool): for i in six.moves.range(CONCURRENCY): sock, addr = server_sock.accept() pool.spawn_n(reader, sock) def heavy_accepter(server_sock, pool): for i in six.moves.range(CONCURRENCY): sock, addr = server_sock.accept() t = threading.Thread(None, reader, "reader thread", (sock,)) t.start() pool.append(t) import eventlet.green.socket import eventlet from eventlet import debug debug.hub_exceptions(True) def launch_green_threads(): pool = eventlet.GreenPool(CONCURRENCY * 2 + 1) server_sock = eventlet.green.socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_sock.bind(('localhost', 0)) server_sock.listen(50) addr = ('localhost', server_sock.getsockname()[1]) pool.spawn_n(green_accepter, server_sock, pool) for i in six.moves.range(CONCURRENCY): pool.spawn_n(writer, addr, eventlet.green.socket.socket) pool.waitall() import threading import socket def launch_heavy_threads(): threads = [] server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_sock.bind(('localhost', 0)) server_sock.listen(50) addr = ('localhost', server_sock.getsockname()[1]) accepter_thread = threading.Thread( None, heavy_accepter, "accepter thread", (server_sock, threads)) accepter_thread.start() threads.append(accepter_thread) for i in six.moves.range(CONCURRENCY): client_thread = threading.Thread(None, writer, "writer thread", (addr, socket.socket)) client_thread.start() threads.append(client_thread) for t in threads: t.join() if __name__ == "__main__": import optparse parser = optparse.OptionParser() parser.add_option('--compare-threading', action='store_true', dest='threading', default=False) parser.add_option('-b', '--bytes', type='int', dest='bytes', default=BYTES) parser.add_option('-s', '--size', type='int', dest='size', default=SIZE) parser.add_option('-c', '--concurrency', type='int', dest='concurrency', default=CONCURRENCY) parser.add_option('-t', '--tries', type='int', dest='tries', default=TRIES) opts, args = parser.parse_args() BYTES = opts.bytes SIZE = opts.size CONCURRENCY = opts.concurrency TRIES = opts.tries funcs = [launch_green_threads] if opts.threading: funcs = [launch_green_threads, launch_heavy_threads] results = benchmarks.measure_best(TRIES, 3, lambda: None, lambda: None, *funcs) print("green:", results[launch_green_threads]) if opts.threading: print("threads:", results[launch_heavy_threads]) print("%", (results[launch_green_threads] - results[launch_heavy_threads] ) / results[launch_heavy_threads] * 100) eventlet-0.30.2/benchmarks/spawn.py0000644000076500000240000000252514006212666017702 0ustar temotostaff00000000000000import contextlib import eventlet import benchmarks def dummy(i=None): return i def linked(gt, arg): return arg def benchmark_sleep(): eventlet.sleep() def benchmark_spawn_link1(): t = eventlet.spawn(dummy) t.link(linked, 1) t.wait() def benchmark_spawn_link5(): t = eventlet.spawn(dummy) t.link(linked, 1) t.link(linked, 2) t.link(linked, 3) t.link(linked, 4) t.link(linked, 5) t.wait() def benchmark_spawn_link5_unlink3(): t = eventlet.spawn(dummy) t.link(linked, 1) t.link(linked, 2) t.link(linked, 3) t.link(linked, 4) t.link(linked, 5) t.unlink(linked, 3) t.wait() @benchmarks.configure(max_iters=1e5) def benchmark_spawn_nowait(): eventlet.spawn(dummy, 1) def benchmark_spawn(): eventlet.spawn(dummy, 1).wait() @benchmarks.configure(max_iters=1e5) def benchmark_spawn_n(): eventlet.spawn_n(dummy, 1) @benchmarks.configure(max_iters=1e5) def benchmark_spawn_n_kw(): eventlet.spawn_n(dummy, i=1) @contextlib.contextmanager def pool_setup(iters): pool = eventlet.GreenPool(iters) yield pool pool.waitall() @benchmarks.configure(manager=pool_setup) def benchmark_pool_spawn(pool): pool.spawn(dummy, 1) @benchmarks.configure(manager=pool_setup, max_iters=1e5) def benchmark_pool_spawn_n(pool): pool.spawn_n(dummy, 1) eventlet-0.30.2/doc/0000755000076500000240000000000014017673043014626 5ustar temotostaff00000000000000eventlet-0.30.2/doc/Makefile0000644000076500000240000000655114006212666016273 0ustar temotostaff00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = PYTHONPATH=../:$(PYTHONPATH) sphinx-build PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean text html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " text to make text files" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to generate a docstring coverage report" clean: -rm -rf _build/* text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) _build/text @echo @echo "Build finished. The text files are in _build/text." html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html @echo @echo "Build finished. The HTML pages are in _build/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml @echo @echo "Build finished. The HTML pages are in _build/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in _build/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) _build/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in _build/qthelp, like this:" @echo "# qcollectiongenerator _build/qthelp/Eventlet.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile _build/qthelp/Eventlet.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex @echo @echo "Build finished; the LaTeX files are in _build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes @echo @echo "The overview file is in _build/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in _build/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in _build/doctest/output.txt." coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) _build/coverage @echo "Coverage report finished, look at the " \ "results in _build/coverage/python.txt." eventlet-0.30.2/doc/authors.rst0000644000076500000240000000005014006212666017036 0ustar temotostaff00000000000000Authors ======= .. include:: ../AUTHORSeventlet-0.30.2/doc/basic_usage.rst0000644000076500000240000001330014006212666017620 0ustar temotostaff00000000000000Basic Usage ============= If it's your first time to Eventlet, you may find the illuminated examples in the :ref:`design-patterns` document to be a good starting point. Eventlet is built around the concept of green threads (i.e. coroutines, we use the terms interchangeably) that are launched to do network-related work. Green threads differ from normal threads in two main ways: * Green threads are so cheap they are nearly free. You do not have to conserve green threads like you would normal threads. In general, there will be at least one green thread per network connection. * Green threads cooperatively yield to each other instead of preemptively being scheduled. The major advantage from this behavior is that shared data structures don't need locks, because only if a yield is explicitly called can another green thread have access to the data structure. It is also possible to inspect primitives such as queues to see if they have any pending data. Primary API =========== The design goal for Eventlet's API is simplicity and readability. You should be able to read its code and understand what it's doing. Fewer lines of code are preferred over excessively clever implementations. `Like Python itself `_, there should be one, and only one obvious way to do it in Eventlet! Though Eventlet has many modules, much of the most-used stuff is accessible simply by doing ``import eventlet``. Here's a quick summary of the functionality available in the ``eventlet`` module, with links to more verbose documentation on each. Greenthread Spawn ----------------------- .. function:: eventlet.spawn(func, *args, **kw) This launches a greenthread to call *func*. Spawning off multiple greenthreads gets work done in parallel. The return value from ``spawn`` is a :class:`greenthread.GreenThread` object, which can be used to retrieve the return value of *func*. See :func:`spawn ` for more details. .. function:: eventlet.spawn_n(func, *args, **kw) The same as :func:`spawn`, but it's not possible to know how the function terminated (i.e. no return value or exceptions). This makes execution faster. See :func:`spawn_n ` for more details. .. function:: eventlet.spawn_after(seconds, func, *args, **kw) Spawns *func* after *seconds* have elapsed; a delayed version of :func:`spawn`. To abort the spawn and prevent *func* from being called, call :meth:`GreenThread.cancel` on the return value of :func:`spawn_after`. See :func:`spawn_after ` for more details. Greenthread Control ----------------------- .. function:: eventlet.sleep(seconds=0) Suspends the current greenthread and allows others a chance to process. See :func:`sleep ` for more details. .. class:: eventlet.GreenPool Pools control concurrency. It's very common in applications to want to consume only a finite amount of memory, or to restrict the amount of connections that one part of the code holds open so as to leave more for the rest, or to behave consistently in the face of unpredictable input data. GreenPools provide this control. See :class:`GreenPool ` for more on how to use these. .. class:: eventlet.GreenPile GreenPile objects represent chunks of work. In essence a GreenPile is an iterator that can be stuffed with work, and the results read out later. See :class:`GreenPile ` for more details. .. class:: eventlet.Queue Queues are a fundamental construct for communicating data between execution units. Eventlet's Queue class is used to communicate between greenthreads, and provides a bunch of useful features for doing that. See :class:`Queue ` for more details. .. class:: eventlet.Timeout This class is a way to add timeouts to anything. It raises *exception* in the current greenthread after *timeout* seconds. When *exception* is omitted or ``None``, the Timeout instance itself is raised. Timeout objects are context managers, and so can be used in with statements. See :class:`Timeout ` for more details. Patching Functions --------------------- .. function:: eventlet.import_patched(modulename, *additional_modules, **kw_additional_modules) Imports a module in a way that ensures that the module uses "green" versions of the standard library modules, so that everything works nonblockingly. The only required argument is the name of the module to be imported. For more information see :ref:`import-green`. .. function:: eventlet.monkey_patch(all=True, os=False, select=False, socket=False, thread=False, time=False) Globally patches certain system modules to be greenthread-friendly. The keyword arguments afford some control over which modules are patched. If *all* is True, then all modules are patched regardless of the other arguments. If it's False, then the rest of the keyword arguments control patching of specific subsections of the standard library. Most patch the single module of the same name (os, time, select). The exceptions are socket, which also patches the ssl module if present; and thread, which patches thread, threading, and Queue. It's safe to call monkey_patch multiple times. For more information see :ref:`monkey-patch`. Network Convenience Functions ------------------------------ .. autofunction:: eventlet.connect .. autofunction:: eventlet.listen .. autofunction:: eventlet.wrap_ssl .. autofunction:: eventlet.serve .. autoclass:: eventlet.StopServe These are the basic primitives of Eventlet; there are a lot more out there in the other Eventlet modules; check out the :doc:`modules`. eventlet-0.30.2/doc/common.txt0000644000076500000240000000021714006212666016655 0ustar temotostaff00000000000000.. |internal| replace:: This is considered an internal API, and it might change unexpectedly without being deprecated first. eventlet-0.30.2/doc/conf.py0000644000076500000240000001512214006212666016124 0ustar temotostaff00000000000000# -*- coding: utf-8 -*- # # Eventlet documentation build configuration file, created by # sphinx-quickstart on Sat Jul 4 19:48:27 2009. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.intersphinx'] # If this is True, '.. todo::' and '.. todolist::' produce output, else they produce # nothing. The default is False. todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Eventlet' copyright = u'2005-2010, Eventlet Contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # import eventlet # The short X.Y version. version = '%s.%s' % (eventlet.version_info[0], eventlet.version_info[1]) # The full version, including alpha/beta/rc tags. release = eventlet.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # Intersphinx references intersphinx_mapping = {'http://docs.python.org/': None} # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'Eventletdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Eventlet.tex', u'Eventlet Documentation', u'', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True eventlet-0.30.2/doc/design_patterns.rst0000644000076500000240000001447214006212666020557 0ustar temotostaff00000000000000.. _design-patterns: Design Patterns ================= There are a bunch of basic patterns that Eventlet usage falls into. Here are a few examples that show their basic structure. Client Pattern -------------------- The canonical client-side example is a web crawler. This use case is given a list of urls and wants to retrieve their bodies for later processing. Here is a very simple example:: import eventlet from eventlet.green.urllib.request import urlopen urls = ["http://www.google.com/intl/en_ALL/images/logo.gif", "https://www.python.org/static/img/python-logo.png", "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif"] def fetch(url): return urlopen(url).read() pool = eventlet.GreenPool() for body in pool.imap(fetch, urls): print("got body", len(body)) There is a slightly more complex version of this in the :ref:`web crawler example `. Here's a tour of the interesting lines in this crawler. ``from eventlet.green... import urlopen`` is how you import a cooperatively-yielding version of urllib. It is the same in all respects to the standard version, except that it uses green sockets for its communication. This is an example of the :ref:`import-green` pattern. ``pool = eventlet.GreenPool()`` constructs a :class:`GreenPool ` of a thousand green threads. Using a pool is good practice because it provides an upper limit on the amount of work that this crawler will be doing simultaneously, which comes in handy when the input data changes dramatically. ``for body in pool.imap(fetch, urls):`` iterates over the results of calling the fetch function in parallel. :meth:`imap ` makes the function calls in parallel, and the results are returned in the order that they were executed. The key aspect of the client pattern is that it involves collecting the results of each function call; the fact that each fetch is done concurrently is essentially an invisible optimization. Note also that imap is memory-bounded and won't consume gigabytes of memory if the list of urls grows to the tens of thousands (yes, we had that problem in production once!). Server Pattern -------------------- Here's a simple server-side example, a simple echo server:: import eventlet def handle(client): while True: c = client.recv(1) if not c: break client.sendall(c) server = eventlet.listen(('0.0.0.0', 6000)) pool = eventlet.GreenPool(10000) while True: new_sock, address = server.accept() pool.spawn_n(handle, new_sock) The file :ref:`echo server example ` contains a somewhat more robust and complex version of this example. ``server = eventlet.listen(('0.0.0.0', 6000))`` uses a convenience function to create a listening socket. ``pool = eventlet.GreenPool(10000)`` creates a pool of green threads that could handle ten thousand clients. ``pool.spawn_n(handle, new_sock)`` launches a green thread to handle the new client. The accept loop doesn't care about the return value of the ``handle`` function, so it uses :meth:`spawn_n `, instead of :meth:`spawn `. The difference between the server and the client patterns boils down to the fact that the server has a ``while`` loop calling ``accept()`` repeatedly, and that it hands off the client socket completely to the handle() method, rather than collecting the results. Dispatch Pattern ------------------- One common use case that Linden Lab runs into all the time is a "dispatch" design pattern. This is a server that is also a client of some other services. Proxies, aggregators, job workers, and so on are all terms that apply here. This is the use case that the :class:`GreenPile ` was designed for. Here's a somewhat contrived example: a server that receives POSTs from clients that contain a list of urls of RSS feeds. The server fetches all the feeds concurrently and responds with a list of their titles to the client. It's easy to imagine it doing something more complex than this, and this could be easily modified to become a Reader-style application:: import eventlet feedparser = eventlet.import_patched('feedparser') pool = eventlet.GreenPool() def fetch_title(url): d = feedparser.parse(url) return d.feed.get('title', '') def app(environ, start_response): pile = eventlet.GreenPile(pool) for url in environ['wsgi.input'].readlines(): pile.spawn(fetch_title, url) titles = '\n'.join(pile) start_response('200 OK', [('Content-type', 'text/plain')]) return [titles] The full version of this example is in the :ref:`feed_scraper_example`, which includes code to start the WSGI server on a particular port. This example uses a global (gasp) :class:`GreenPool ` to control concurrency. If we didn't have a global limit on the number of outgoing requests, then a client could cause the server to open tens of thousands of concurrent connections to external servers, thereby getting feedscraper's IP banned, or various other accidental-or-on-purpose bad behavior. The pool isn't a complete DoS protection, but it's the bare minimum. .. highlight:: python :linenothreshold: 1 The interesting lines are in the app function:: pile = eventlet.GreenPile(pool) for url in environ['wsgi.input'].readlines(): pile.spawn(fetch_title, url) titles = '\n'.join(pile) .. highlight:: python :linenothreshold: 1000 Note that in line 1, the Pile is constructed using the global pool as its argument. That ties the Pile's concurrency to the global's. If there are already 1000 concurrent fetches from other clients of feedscraper, this one will block until some of those complete. Limitations are good! Line 3 is just a spawn, but note that we don't store any return value from it. This is because the return value is kept in the Pile itself. This becomes evident in the next line... Line 4 is where we use the fact that the Pile is an iterator. Each element in the iterator is one of the return values from the fetch_title function, which are strings. We can use a normal Python idiom (:func:`join`) to concatenate these incrementally as they happen. eventlet-0.30.2/doc/environment.rst0000644000076500000240000000134214006212666017722 0ustar temotostaff00000000000000.. _env_vars: Environment Variables ====================== Eventlet's behavior can be controlled by a few environment variables. These are only for the advanced user. EVENTLET_HUB Used to force Eventlet to use the specified hub instead of the optimal one. See :ref:`understanding_hubs` for the list of acceptable hubs and what they mean (note that picking a hub not on the list will silently fail). Equivalent to calling :meth:`eventlet.hubs.use_hub` at the beginning of the program. EVENTLET_THREADPOOL_SIZE The size of the threadpool in :mod:`~eventlet.tpool`. This is an environment variable because tpool constructs its pool on first use, so any control of the pool size needs to happen before then. eventlet-0.30.2/doc/examples.rst0000644000076500000240000000474014006212666017201 0ustar temotostaff00000000000000Examples ======== Here are a bunch of small example programs that use Eventlet. All of these examples can be found in the ``examples`` directory of a source copy of Eventlet. .. _web_crawler_example: Web Crawler ------------ ``examples/webcrawler.py`` .. literalinclude:: ../examples/webcrawler.py .. _wsgi_server_example: WSGI Server ------------ ``examples/wsgi.py`` .. literalinclude:: ../examples/wsgi.py .. _echo_server_example: Echo Server ----------- ``examples/echoserver.py`` .. literalinclude:: ../examples/echoserver.py .. _socket_connect_example: Socket Connect -------------- ``examples/connect.py`` .. literalinclude:: ../examples/connect.py .. _chat_server_example: Multi-User Chat Server ----------------------- ``examples/chat_server.py`` This is a little different from the echo server, in that it broadcasts the messages to all participants, not just the sender. .. literalinclude:: ../examples/chat_server.py .. _feed_scraper_example: Feed Scraper ----------------------- ``examples/feedscraper.py`` This example requires `Feedparser `_ to be installed or on the PYTHONPATH. .. literalinclude:: ../examples/feedscraper.py .. _forwarder_example: Port Forwarder ----------------------- ``examples/forwarder.py`` .. literalinclude:: ../examples/forwarder.py .. _recursive_crawler_example: Recursive Web Crawler ----------------------------------------- ``examples/recursive_crawler.py`` This is an example recursive web crawler that fetches linked pages from a seed url. .. literalinclude:: ../examples/recursive_crawler.py .. _producer_consumer_example: Producer Consumer Web Crawler ----------------------------------------- ``examples/producer_consumer.py`` This is an example implementation of the producer/consumer pattern as well as being identical in functionality to the recursive web crawler. .. literalinclude:: ../examples/producer_consumer.py .. _websocket_example: Websocket Server Example -------------------------- ``examples/websocket.py`` This exercises some of the features of the websocket server implementation. .. literalinclude:: ../examples/websocket.py .. _websocket_chat_example: Websocket Multi-User Chat Example ----------------------------------- ``examples/websocket_chat.py`` This is a mashup of the websocket example and the multi-user chat example, showing how you can do the same sorts of things with websockets that you can do with regular sockets. .. literalinclude:: ../examples/websocket_chat.py eventlet-0.30.2/doc/history.rst0000644000076500000240000000302314006212666017055 0ustar temotostaff00000000000000History ------- Eventlet began life as Donovan Preston was talking to Bob Ippolito about coroutine-based non-blocking networking frameworks in Python. Most non-blocking frameworks require you to run the "main loop" in order to perform all network operations, but Donovan wondered if a library written using a trampolining style could get away with transparently running the main loop any time i/o was required, stopping the main loop once no more i/o was scheduled. Bob spent a few days during PyCon 2006 writing a proof-of-concept. He named it eventlet, after the coroutine implementation it used, `greenlet `_. Donovan began using eventlet as a light-weight network library for his spare-time project `Pavel `_, and also began writing some unittests. * http://svn.red-bean.com/bob/eventlet/trunk/ When Donovan started at Linden Lab in May of 2006, he added eventlet as an svn external in the ``indra/lib/python directory``, to be a dependency of the yet-to-be-named backbone project (at the time, it was named restserv). However, including eventlet as an svn external meant that any time the externally hosted project had hosting issues, Linden developers were not able to perform svn updates. Thus, the eventlet source was imported into the linden source tree at the same location, and became a fork. Bob Ippolito has ceased working on eventlet and has stated his desire for Linden to take it's fork forward to the open source world as "the" eventlet. eventlet-0.30.2/doc/hubs.rst0000644000076500000240000000721514006212666016324 0ustar temotostaff00000000000000.. _understanding_hubs: Understanding Eventlet Hubs =========================== A hub forms the basis of Eventlet's event loop, which dispatches I/O events and schedules greenthreads. It is the existence of the hub that promotes coroutines (which can be tricky to program with) into greenthreads (which are easy). Eventlet has multiple hub implementations, and when you start using it, it tries to select the best hub implementation for your system. The hubs that it supports are (in order of preference): **epolls** Linux. **poll** On platforms that support it **selects** Lowest-common-denominator, available everywhere. **pyevent** This is a libevent-based backend and is thus the fastest. It's disabled by default, because it does not support native threads, but you can enable it yourself if your use case doesn't require them. (You have to install pyevent, too.) If the selected hub is not ideal for the application, another can be selected. You can make the selection either with the environment variable :ref:`EVENTLET_HUB `, or with use_hub. .. function:: eventlet.hubs.use_hub(hub=None) Use this to control which hub Eventlet selects. Call it with the name of the desired hub module. Make sure to do this before the application starts doing any I/O! Calling use_hub completely eliminates the old hub, and any file descriptors or timers that it had been managing will be forgotten. Put the call as one of the first lines in the main module.:: """ This is the main module """ from eventlet import hubs hubs.use_hub("pyevent") Hubs are implemented as thread-local class instances. :func:`eventlet.hubs.use_hub` only operates on the current thread. When using multiple threads that each need their own hub, call :func:`eventlet.hubs.use_hub` at the beginning of each thread function that needs a specific hub. In practice, it may not be necessary to specify a hub in each thread; it works to use one special hub for the main thread, and let other threads use the default hub; this hybrid hub configuration will work fine. It is also possible to use a third-party hub module in place of one of the built-in ones. Simply pass the module itself to :func:`eventlet.hubs.use_hub`. The task of writing such a hub is a little beyond the scope of this document, it's probably a good idea to simply inspect the code of the existing hubs to see how they work.:: from eventlet import hubs from mypackage import myhub hubs.use_hub(myhub) Supplying None as the argument to :func:`eventlet.hubs.use_hub` causes it to select the default hub. How the Hubs Work ----------------- The hub has a main greenlet, MAINLOOP. When one of the running coroutines needs to do some I/O, it registers a listener with the hub (so that the hub knows when to wake it up again), and then switches to MAINLOOP (via ``get_hub().switch()``). If there are other coroutines that are ready to run, MAINLOOP switches to them, and when they complete or need to do more I/O, they switch back to the MAINLOOP. In this manner, MAINLOOP ensures that every coroutine gets scheduled when it has some work to do. MAINLOOP is launched only when the first I/O operation happens, and it is not the same greenlet that __main__ is running in. This lazy launching is why it's not necessary to explicitly call a dispatch() method like other frameworks, which in turn means that code can start using Eventlet without needing to be substantially restructured. More Hub-Related Functions --------------------------- .. autofunction:: eventlet.hubs.get_hub .. autofunction:: eventlet.hubs.get_default_hub .. autofunction:: eventlet.hubs.trampoline eventlet-0.30.2/doc/images/0000755000076500000240000000000014017673043016073 5ustar temotostaff00000000000000eventlet-0.30.2/doc/images/threading_illustration.png0000644000076500000240000010177214006212666023365 0ustar temotostaff00000000000000PNG  IHDRfWHiCCPICC Profilexy8lcٍ%5k"dd1PlɖB T*P(7V֊"꾯\yy>|9s3pS($8 B6;v£#`@ =)VVf_ѫclTA`wcmB 4~tsIJT[k]o@ tv:y}I"@C U 8f@@ ٥=)T }\JG#PӁbc tlҐ 'ؾ[o;GYi+zO} F^DmJ B/"_w큀&>@DN.F ʑUTCڗцɄCfIgc"9T8}q\#<";HwVa!'1}Zi]e2x,yBℒr*QSc_:z-4ÃFu&f~V7=ٰ㶗sut:|HKkѷn=6q>x~jDcJ$.~k 52D+T)L*\8w[$S2jɱ讘NQN88;ؗԜ\z.TL H7r5e](xwmphMIg}e 7E*+ͫoQnT\-SzM)-f?Oj=z::_tJ~֭5;>t__;wkJrn1ykcrcIHSSOgXfff'%79?x&m`i4h1@XyZ%D2 E`@223̲lqZqP9q/yw0Q'ΈI4KJJNNG>P}rʚz}\8(Jf ghmb|SA9u+!^k y[u;m{#kG.N#$װn'<=}}moHD SVrP0pK㑽QN<C?'ؙڳqIAɉ)URc^fpfZ^8Uwqz6.G"WzF>j5Bₒ2=7g*TUztuXB]t=zFQM-u:6Sgupt}U*c+uǛk}! YK|81(qӋYY\Ϯ_t~XZxMs?+kn7=HSp40,IDF.ۘ1Ln\V">vgl; `.gn};ywE8YDP"? cb=-e;%ä5w f,ʎʽW(PLCRRVPaUV}NkH&,j<ٗ&Js@Pm}. ZX}3>);F1&&=If~YP-,Z ְ<=ogk/dѡ1'#d Wkѫngcs~DıݤJ_x0%3H'+5'X3x4$6T ^m8[xjA@‰'cc\b87|:dz:I)Χ^MIџ9{velל+yW[ \k)l)j*/i(mZɛ+U,nTԴӺG/hd!h{˳Ͽvtwپ*^z}owC#z|2L~&0 uGv b[[n:l/ M…dx+|G\G#E.+! u5 P0CS MF#T>&~& %f-w)L8E%) kk/[* {V]pq\TYnoQ7;r0b^Yޝ~fČcC5`AOP:AR4 < Z_qWqI)s=ik$/]q͝ _)*/.zfbe-jZuGMyY=Wr>AѡcʓjSY܋߹VNV,[]Ɔ͝hn[!`hЀS7<qRyEI|QEu O-:=ŨTδ̬|ILhd|a5eɆa#g/r`c o99q\B\<ܗxxwH  Iħj.U$%wVH6J*##CkPXLDJjy{O4Q3IԢNչ[ף?o`$elfB51{b`)ley(Mm :`;88_8qU/ osDԱ=~ \g(!#gQ'NE/j:s{"!lO6>TL YYK.?Α˽?_qV] ,\rV}uf]{cB[j=Ryt/Ļ{HKpd֤/ җ+ūko76; dvve{=#<9#{D<Rƣyh2f>2<@CAӁ 9lmB~8(LH4dddжb?RC>2=$[ȷ@h@aG)f OOOmڥ{ЈďF1y$HtOAAA,%^"e)ǖ۷;~ΟwN =,,,""hK+_~իWFAb@[6 _Md"! dw/׬c"c6nZRe GG7-(98FF6&LH.8.W9&n߾={o)S okoDEGKߚΗ Y P[f@k ֨ejZRd,pR rM6}zÊe'رe65r,eOʠ )$$s|8eʔ9a>_H Ϛ 98(5uMV}W|oRIآl+L|`=_``xog.x х+U>n ӿxI3|dTT Tl5,ZtW@l_Uj!zf▟68}-^B<Η毽A%M R ibC >+,r8s6ژt Ӓf /y]G&pu'?qqBy2u:Va6\x!&.0"w?.Vtb,9E\QL=B[@4KFd&vD{q4i.Rze#5P.\hР/!yI~\1ɂN?әFAЎΗ{l{s<`-,h16^~J^H1"sF^\[i3Ԃ\; J=O=K'Wڶ 1㟭[5aDcGfF ꖘWd\̬f]~`/h_IYfZn;D@sS&1 k.tR$\@AdaSX"@Hh]=$KAz:kCFaF`ysX&*L4Ox: |HbV3K,Y:w *MaV>jĚWй~†LHTTX,e6~qaÇJuNHXf(u/Yf|⾬B(kQ w4ә%:,$u 1bxxxxhh(P )C?a^ }ώ IB cͿڽ2 ,'z9<SE2Ar]H%bRQ<߰Jqrce+ې=Iz8VڐS_WQM6|!b/ e`B\,ڵk yg,+ ۭ)r O7˃B)V~%b_DFJEhsGr!C=WSwz qKg_[7ъkSE,[6k(G,Y#zڔpD6TFz ?F+ ?%uF,恾Ib.mH̩i'.E6aM봦rX6ި S,؈},DMa Rnu0A  5T]ˀh|66,;^~*fV#GQ8D~pmFa ]F!-Yě&t-Û'^!4S@V^_l`dּt <_o #4ɑ0 /y EdX0Yb#a%E BC2/Ui(&XIb5rb:RrJ8 &H)PİIN/,T%R8 L2X Ќ,pY?ʴ^[ gMGh 僴$Ü sTPKĢ!6T6 %D+Qt}HT{]MbpS}Dl A=Ot)N1'̚3gN'z>R-X8*ѷbN~hDdYaLêQnO"*)1K׬ 9W)Ss-_ rLO{ Gd~ pXj ntsGoia\![jfk P~)N4h"kHZiy2)6,aܶ=yb'<ʳn;S|' S23qg$~${K(Th˷|;w.hЀExyʀVz[LE^}ɐʔ%|rg gPORewLl=KrV/z٣ ",t})֢,crP(4T5 H=c"N1Ĕhb_0/YiPh'Ɉ;x_,iU.pc6Og+Oc[ݯp=Uf[tVPsM1i/Tٗ_E~ҫMkeSw*ay2ˀqA>Օ+Wv?/YxFi^L\\\2U*(_eŕ-SdjrޣjL]߷>upe*pN:"ڙVO y`bWVvft -iZ˦ܟs_lS5^Ä JT'vR5YI/`H=6 GB)rĩ "Z$3$e 9abܽ{wdFCcv f ]kWަ۱ q R%ٸҵj;6a%g]9iJgЕS,_1k?fU33j-3 Vo;cZYV4r Jc}>]d ˉ{Έm"_`Mk׮RVjuXd&7,׆l)l0p[@V?N*Az!)Ɓ;Ɠ6{AKGl $ADfVWR*>tMV!@6g &-^svb09Ѡ+}D>Ο?ZO60)VpOmINlS~}·;6^F}p+Y*{wv{nav_H7MnOԿz |dįfB_#Y`C~-^4*_@\~ ~xys-/TpA&P3&,ЦD2OG3%uCzHCbsweGħ}NY_6و.2c8"@FTN1LOx5=1Ttjg!=ZFNrm@_6zDqRΈk\T^8L1YƟw|0hpTlOD6ʖpUÒ\Iϟlٌysd#;~^[kzUΒ[5?=ujN)#`tdC8H!2Hd4 "RsmϰyՋlb?̩G7;Ϋ^/e`7ɭZ˜noƏm׮cpp3Og xĬO~f% +`O'O;U#\)K›Smq)o6B6 Hڼe'Ol$sN9Ϯ { ܐ6O~ %& ~ ƺ a ,w+/]X-vɳg|jso—eq1>Mt:A1>ѨI:"xn>  Tʠ":ٲeK&Mq5bsW/{t2]h "DB?^ GNV@pdXr/o;g͟ӗsVrS- #|Mŵ>AW1ثmw\O yRceРw$:'m_Uk~XWlG_?ŕXƖ߉'K kͶBAtqkvۜ&ڞro8pm[+FƾXzؤw'߰xw\7rTUy/w??)-?y*֟|D%CLDނ?_4oOZeivquiaF{n{m@Ʀ`DDD-UO .7ˠW% F{Ӱ 3ɇ?(:]ʠC`GzͰڕRݷ=U)OdyJ%!_8_tW^MZne[Ԛ ~6WwHTw?USǸ> Ji.Xit⡄#Τ;./|pc=hsA>d36&>*wΟ/-?sԑCrGFv4Σ9HOr?XC7-YlY' .?O3bʐcSep << UijT穆hA\zU /N1avhtl؛&N746"\\,]g<8벭gJF,ybePF{/>)Ђ8\A. \aEj)gyӑE)PV6[{D+ Kd\; eN9+1._x~ړ?K\-}.3i0Οğĥ6zm{޳fYM8'+PIGOxԢ"4y"HU2S e#G=I->`MSeCvرxՊ :q {˶M3(x\Ɨ٪HاZ`$!j0iq7|ҥ󎝻Y[x^t?/๝ҟ9@zj0 t!EgرOi?ߋLI\81!ߵkc%6N?3 8::ZPhĵ2h\/edfC,[,dmҀ VH L3jbbb>-(=DZp?LBnHO^2g>g`ûӧU,hG'3҃TԠP08=ζA%8_oR\HsQ`zݙfOGjem8j^MRN'>ɻ]?%6vyE~m猁쪕x5q%!4DžGF"r :90eh.Mۨg*e^ ,ѯo;/{F@- 1:xEo?#mPq׀Ȳs+)$T!Bp[OqN.y.CӵSS>W, re;ӂ]Ng(/aUQރw= .>Am:pBtv>f` GݟV gHܟ |<8-F3-SQLM}dac|\`}ǼB{;GHB`hӾ0ơBQiQ~}1eYCuVxÇهܺuGϞ $+1E7: C`ݼɁ4 rAnQp3u&"Ň22&9ER_2JOt!TtG,MT[(_H4a!e˸u/@54nH7ޖM7MW;ֱG3{y;ۉx='\]ipsiFҽt{pώcF~ֻ%+^qTƟ~e:\z․_4Nvz"p[7mz\Ӧɓ'22<(^Y,f \$'';rNF̈xS,b JM̕ HS 7BBT!llp@0|9\ ]0^0zK%,go 8tnq1[๡  'gşԟ|HP2$]ܑu) Mg̞~3.ՏN@/iV;ϫQ< &m4@kiXE/L*zʠ `~MU&LQ92%9yY[JQ'͜Fv֭9iD9rg? v<T[;kծ6AuG1mx]Kic@,l̐qN[KtK([`A(.Ҍ3FG3b Nv蟿zQh6` o2=yG$OJɱ~#VL4ؖ`RM&Ljokg6Kv22B@k] $bQ݉%6ëgzz8V~bjNDӫ 1U *esG削Va?8 ,ߴڼ;d}0~4|fz<ѹ42РI]ʅ %~W;leIQ+G 7q?}T.3m\d@ghhN^UH)+*)2)M6TY:6kyKKvdY7x2 fY%~t2ߜ!fu/ %z7{5-U6Tt@b@d\p?̏r&r)k,+@G:TU_%銅 fʠɂfW+UQCޣM6@Ix_.8sNd\pۙ#rW<.w«TD1r-R+WiȳCAL4Me}e8N}5;ě&gcyY%*v G}9\0gvStY"*T@fJڵl@Q1P.L:lw[ESQ%l,)<+M\؉rrV!~_.CR.8K7eT޹OXe`,%"G ^mpΝ۷o~_Ozo9{e6;*'O'cl_^.X*-_.x +;rNxT.T(MN;h}έ1W.;sO_NZq1j|lT줉ʕ+w O"7'O63 4tcY9̎,k_.xNl.o++v\G>}Ƥ\8\ڞ&&BRjI n_RN)h3DDkR/Ofv fIr~[V*))v\\GY?/;Dղ(5(^iw8ELT VS3h7ZKz,Xlےl$ֵY4㾖V-GS8 ;fŘ78=urY~trj=I* 5N0cy˜P,1"VOl^Dr&{7BeB(ClVSB#;|$@OM <6@!jxbQF, eV_< d2 wH=4UY1@a JKKU[NlN,z7Nh!(@XVz2pTf:DMja mVH.O*2YhIh1`7Nm&FpLEָKƺ)4"",Ȱ@*W6drtDhut)B/OkJ)" 0e_PO,ppnz] =t" @ZU zwLYZuQazbp)mbx:ZZ55Z4ƒ)4"|-ODW>TR84ٱvzW"JT.##pDlfF RGwSIb)a_Ȅ(Up F8@5a: Mf*G+adXVK-@p 8bbB6U PBpL"+(j$vLa0Ẓ'|"Ba2 !xz &cfop %*F%H,JZd@?0#ok(Sy"MՐ"̼RpY+cUMoDEpfܭqdnն]GmuW˟HذI.Lz%۷M8pdgcۊ9VhqH9s r^W d8`!4GҶrRvxxFfʩe=,Sz%b StYReBV9$Czvc IDAT87_OH)R!) "i4נ$Ӆ_)b_lyS>e!IyS1Y8tOX\IH^o YS\YS{qam 8ދs4;n#thaXOy\64o J]u&dp]<% %пu31<, =!0i#000222AirfK>C{.wҟތ%o4un 965XEn`K7jinR5 B&F pU;)4⼁2D 1Ǡ;wn Vn RsԜqqqeb˔*bŕZ GCZ-bpynv&|Twl)VV6bٖZׅm+T*[ҋ]컖:_ي~:e]y ~lFZ,N_ J:Қ7#1-PaAAq-J`#0DhN"e{Y<1OݽvWع&*nW-mnӥtw]qB–+Zƈ);|8Q_/֭;m9)eHZnZn%%!;$Y)@YM +t20#`VAV(XHغpgvg'7d~ɥMQ$ֱ'vR[6E.[mI²w%4&Ҕh7"q;nY};I3<0;rThu.U1*%9/K4dfc:~m>~=I)~ؒËk5msN_Tج;)) vX;n+DlP)& 4Ex*""f 6YPY@{aA;w$^$ۥԧn.GeT]@mPS ۺmx"Y߹VϝZ=[ק| Ȼ}5CB?51Ё˟jyoi3}d-ky3ƾ o#@ (^z>wޝ;w'Nʻ^zmA8+AMдPCTc'L%QõYh l)N .[ě)Q1!6{!!OLo%]KhVKH-tUlftuy*kI;ʊ"LGys( /%,,Jm \דw)Vc:JI$ BbB mT ,D X*+KWk2I-A\/ק'y捎JDDD\BCC[5L(!(: g(;zF5RYRz5 dW1ؒT-1i>C!(aD7b2h[!X6N("' /P`U Tҵ0 ;x[2kfM;t[*wKfNݬS=ҪÇX3SO?r=*wKfi:pO !՗ˈ|#G,bBURpY@O,'rZnY:=o[EI.@\B 4Vfxŵ~ ]E,eS붝}zu'&_'ۯ휴~GOz`s'jVyƜu(Cޭ6!e& B k"~B yɝE` J D ݻgdWԹwsSq2t?ж O v_w?9rQ8ܨ5n+\0bP0h qA/~h fف3hs 1"v+#'%?z6"аlI$$#-}GI;T1|!"j_;;O*/wΉ׶B}L|sb+)ɝ}w9*TiL]Li:?M ~e3膔dɒ׮U0kv,"!Yvֽ7]yr&t!@%)醀9y~~p LmEc#fW*P[oq{v1bZM[o[k-m50_u|m_~ V7 9U@"rv5b{P QHٔ/n߾}fT JHncHW sC?UwȌ [ꭍ[/n Zü%sI*BmW8 87ܼZ>L!v&\ߴꭋV8{ܰq.vXeђ.\6aJ~l?Kçͬ3?,?ziFP#6qp;+,T)C1ˠxbG۰ :^z:`Oh G*omgڄ(j}[ݣZ[.݂ Ma&!),r<7ܲ7F޹~|hdM%t"3נ I$ j<),,[~P_[?sqa>.ߏh7C`gO ;~<)?Szs[P X+G#/%r,C JDg`d+Rȁ*vA2KnB )k8׵ɏF7"OlzCҌ ?mcO'哷V?0q֘ד.M|C̳b_8~n))ĥ ~pqkۻdKO?vOmj=dۭs{Ɏ#8͑FHj;~VcN}߱jT7}փ:ѲV㾚dAWYY!<աf!J+Z hy@!V3e b(Z+t, ʮvRͶyU?/UԠs"ީ ;޼t'ghl|.{ڃϗݶy%kb˕9I <0=$l26Jw%Ԡ֦5/}Hkt5w DȅM ~[޴ɏL(jnX勵P$9K7ؾڏ|kgJP%jthnD2wb@JhЬqX=*2̎E@3#8H#k׮}2_]b{".-,OF#}G`rz򍱛J|;0:^ q]mZ)Quc g4; L)ۖ'ttZh- ș ڭPT=qŻmyo7@9΀@`47/>vCӗ1r؍7{]j'sH7zTj?ݴ~#S=pdtz3LiR-.24e|!fwʵ PPj)N4+v9.܅|Ϩѥ'[<] >4*=WmW*m缽O6ea!V6$ܑav#'#0b 7EOglZ]E밻\v&jK=h vd\ e-)3L9<|Atsy1l盃'$7{o;Mݥ`>0PI20k/!ic/ƾY/ 6J.[t͆j{7//8|d >m^}϶h@d@҉O~Mbا+O?;ďKl"zisx\H拾]={M{<}wq :! CK*{ǟ&*܀tc\D rނNu%Rl ]ۜpXCw-TKD"Q%*N˶ l⅄%Kv>jU ߭8rJ~Xg,\HzCq^j4sef9j4r2۞;J*Bt0!HIzB!Sk|Q˖9zlY~mG.{㓾`Ͽ9g''}?s&pgD&S]NAsB p/`WO, zaB%bR|7)k0`ݫ'ot!xVr`B]38'rt@14<)TԝsWٸӹOsޢ_z m>]y~N94sG߶6 UGbʠ^ ݹSĬ!N4rʝka9] 6?㩌2?j._b_~~m˗eKg/\c~7 ǶK۹lָ eKeh6p Bƅ3gn1uh g Zb7ݍ by1-؄#͵YJL]AZFs=QԬI_͌td@G˳hY۽ܔKu%"Xc!\_H-XޥLjmpHI rbA=^ d9~OP9!*be L2 ȥ[J~^R]"@?hϺ^}s©cl0wNݠi΍k\ڄ& 1Uk2 qAg/PPFÇ$'2m9BwHݙ@rTfF]43'?*Ci'uΒ_}6 /hD$^<[~mB +i#ـw(\Or_("XزXX{ Z N1h֠ :*m~I6_YS\0NnQ̽c+o@s+" w\Yrc0S.X5 ]@fqJ+|AS5td YҬQf ';ӑJts(lQ_.U.xrLn8Jٖ}2NP-!e;sKyoF$h8[6ܧwo 0Hy/蘘=;=@A7 آrJ 9\M[ 9\,D Rgtt e0DFdAy d M6]2Ӵ=C/?{|)<1 'MB924ԼJkH2N:.¯70~զR?ك/7~}9A(|pK%PLm{ A->iv{WTs(SmvuH&P CSݑf2iA$;:XIr3~ʗ S;ψA8/v~4~˰x\9\N0xzJ)RS֭X)5b e}BqʠGe@Մw+-ZMD 16!g ~ 傧rr@sP`XgNc"k(eDJ~Z_Hį2 d ])e}ժز9_`6Ec74ظr /W.SN).ǔ \NZEhe}V-˜qM'RM 5ԅf&}ҹsyZt,n/aLrC Ul`yJPLӥ :eU d B$T١zm,X+̓ec) Eee傱@V+\ r9Wuiym6\pEb+].Fl$e`_.8/'2P.;%Bo\$\pr~j'_.ې|^ rFo;=(~(I3vV-J@"rt>㔁)e&r׊sx:ܚVС\}9\p]Z.tw& 4Pd(HqrQb 2hJ&+ (b+YddDoAQ60Uc W.x_A傇rsF=Ϻ@QmwD+bba,Xr . /=E a;x뭛7o&&&Wn}:Qr]S@P՘Hw\p(b/3|M(]\"(L7 |C4qI+4IDATXEbcjU(oLC"ts aj)芆b9?W^1xsXHx,JB+%q{}Ŋv\0wnyY&C.r;D8J^eP±ÇʠAY2S ,rL H)f A6GDDԯWqc,\35SDZXQ`7KH_8oxerV>*L?Q傕kň O֯h#f ,8 i!e d lM3V FseTvJy'r_}oIُ'I.qPPY% y`I-ϟ9ז? , az  XȦ hB YK'5Kݱ(K$mϦrC\q2r^JʾzlhCgOE\/]d L-RK:NvkXv…&UvގM|Թ3+˗.Q/޹G/g+WB.pkʹ~ٲfkW+A]:兖pdd˟+XR路_z}-** C\s YBʀYo['W['wޥ^YcwߕZܫM NRռb++kQ;XI ߕQ\x Gt9ڕ`R0]b"ו`B? ]N#B(ZB 4KLWN T!@‘'w $ˍuh+? ]^וX !Wʘ'QI-)eH0RFd8~S_4_F R}"^z+ \k&Є `QÆ/NrIݦ7Δ#DI0%oM ]Γya4O Sdc_o4W/]Zt b)]ˠ 4S_jMЍDkGŎk׿zCW(}*Lق!1'̏] X b"fMt`AH iܸ?ܷ/7߻#K uɰD'}ѣo\GNOKب?8Ўnސ*Y 6q6fwm vb@}J eVS!'?ALL( Ki) b+jSy4\:4hsHARDbT ).u,]Z1ETD =UvO4Qh|髫VeاrճXwOL("4)4PBE[oݲOk:*쌥S.ΉQґeLQG}}wj 2!!!mA3Q۔6DY7}}|;7.^}݇[RJ n|4K/ВqsB=w,i]KArOd:6+^ Bu"fY|Ox{9{gpp:]4(ح6ʀoD,4pn"UCœ9;vͭK:Tfg@]Rs!yR8Hr:L_,!gSZEGM9UPB_1ۜnk`'&b2a{MM;0W4Xka\ Ϟ>[=?B͒63{l) 0|̐Xx.K/׼":D-h *}K_N?ݰxJB1|hƍhZ!%8hrҤg~zf]T \is(ȅ9 *OZABVHb|lc$TwKC\6&9vͫKaǃi06|Ĩ2J)1BwL8O#aeK_INw9[E)82Q?`0~֠*~G9|{ߵzj| 6hFʐJ J>26(NzOv+H%˜`\:+Caų*`V,RXV8ŠeUرO:&931)϶mݼrJ|Py1JPY b)2$L\j܁*3N7t ̥ oy<NHO&St *T Q(D5ǎT)xmܼlĉU@A۷P&Q]I!裃\G;={Ty)(ҹQM ɥ3qBVHaU 6:.587:AGե*@$G~_k$ vO[2_ε dL|-h4esa``c}A.$Ύh` >4bY&BXTDxVK['R`^xn?~0q8)/`'(z 9h~3KƋA>x.;70vMZ/R 2Kg]`.Ѯe;ҥVBӥ^0< p4H AfG^9ƌUŧ8` K$ &o0,3#BPS,t?{ou9e(‰OGrHP`VTSZbUyd "TƑ fFcSwgxǶf]s͜9sƍS/$b,ʈRoxW ?؊Z 5 Aώև~1Vs.ty|m]`.]y"/ezbY :.-BX ؔqqY< G~ƭr/aH-(A AsLT"`k, o_ -- $()C92S| nZ2z8Kl2ɥЎKC1vVXagKz< "򀄨MTs1Q)O3xvd\ӫێ"} tNFZ}>"i9eDyj(/mi!z4A@C -)ԩSxo m=)w&,Tq3#p;?1cǠl{/k„ | rXCEcqFb˼ˀ!:z;yyFؐP h #wF2GUBE {vd!:Ihe058bȃTfY@FHd 'SpS;XQFK9*e@6xQ ;&AA#fb6QaI82G "_Xv\HA BTA-]#|2x_#Ǒf" he`1X<ɵa(r,` txGAj6drQe w8É#?Q1M`_)pFRb il`_ Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` eventlet-0.30.2/doc/modules/0000755000076500000240000000000014017673044016277 5ustar temotostaff00000000000000eventlet-0.30.2/doc/modules/backdoor.rst0000644000076500000240000000232714006212666020616 0ustar temotostaff00000000000000:mod:`backdoor` -- Python interactive interpreter within a running process =============================================================================== The backdoor module is convenient for inspecting the state of a long-running process. It supplies the normal Python interactive interpreter in a way that does not block the normal operation of the application. This can be useful for debugging, performance tuning, or simply learning about how things behave in situ. In the application, spawn a greenthread running backdoor_server on a listening socket:: eventlet.spawn(backdoor.backdoor_server, eventlet.listen(('localhost', 3000)), locals()) When this is running, the backdoor is accessible via telnet to the specified port. .. code-block:: sh $ telnet localhost 3000 (python version, build info) Type "help", "copyright", "credits" or "license" for more information. >>> import myapp >>> dir(myapp) ['__all__', '__doc__', '__name__', 'myfunc'] >>> The backdoor cooperatively yields to the rest of the application between commands, so on a running server continuously serving requests, you can observe the internal state changing between interpreter commands. .. automodule:: eventlet.backdoor :members: eventlet-0.30.2/doc/modules/corolocal.rst0000644000076500000240000000023214006212666021000 0ustar temotostaff00000000000000:mod:`corolocal` -- Coroutine local storage ============================================= .. automodule:: eventlet.corolocal :members: :undoc-members: eventlet-0.30.2/doc/modules/dagpool.rst0000644000076500000240000003636314006212666020466 0ustar temotostaff00000000000000:mod:`dagpool` -- Dependency-Driven Greenthreads ================================================ Rationale ********* The dagpool module provides the :class:`DAGPool ` class, which addresses situations in which the value produced by one greenthread might be consumed by several others -- while at the same time a consuming greenthread might depend on the output from several different greenthreads. If you have a tree with strict many-to-one dependencies -- each producer greenthread provides results to exactly one consumer, though a given consumer may depend on multiple producers -- that could be addressed by recursively constructing a :class:`GreenPool ` of producers for each consumer, then :meth:`waiting ` for all producers. If you have a tree with strict one-to-many dependencies -- each consumer greenthread depends on exactly one producer, though a given producer may provide results to multiple consumers -- that could be addressed by causing each producer to finish by launching a :class:`GreenPool ` of consumers. But when you have many-to-many dependencies, a tree doesn't suffice. This is known as a `Directed Acyclic Graph `_, or DAG. You might consider sorting the greenthreads into dependency order (`topological sort `_) and launching them in a GreenPool. But the concurrency of the GreenPool must be strictly constrained to ensure that no greenthread is launched before all its upstream producers have completed -- and the appropriate pool size is data-dependent. Only a pool of size 1 (serializing all the greenthreads) guarantees that a topological sort will produce correct results. Even if you do serialize all the greenthreads, how do you pass results from each producer to all its consumers, which might start at very different points in time? One answer is to associate each greenthread with a distinct key, and store its result in a common dict. Then each consumer greenthread can identify its direct upstream producers by their keys, and find their results in that dict. This is the essence of DAGPool. A DAGPool instance owns a dict, and stores greenthread results in that dict. You :meth:`spawn ` *all* greenthreads in the DAG, specifying for each its own key -- the key with which its result will be stored on completion -- plus the keys of the upstream producer greenthreads on whose results it directly depends. Keys need only be unique within the DAGPool instance; they need not be UUIDs. A key can be any type that can be used as a dict key. String keys make it easier to reason about a DAGPool's behavior, but are by no means required. The DAGPool passes to each greenthread an iterable of (key, value) pairs. The key in each pair is the key of one of the greenthread's specified upstream producers; the value is the value returned by that producer greenthread. Pairs are delivered in the order results become available; the consuming greenthread blocks until the next result can be delivered. Tutorial ******* Example ------- Consider a couple of programs in some compiled language that depend on a set of precompiled libraries. Suppose every such build requires as input the specific set of library builds on which it directly depends. :: a zlib | / | |/ | b c | /| | / | | / | |/ | d e We can't run the build for program d until we have the build results for both b and c. We can't run the build for library b until we have build results for a and zlib. We can, however, immediately run the builds for a and zlib. So we can use a DAGPool instance to spawn greenthreads running a function such as this: :: def builder(key, upstream): for libname, product in upstream: # ... configure build for 'key' to use 'product' for 'libname' # all upstream builds have completed # ... run build for 'key' return build_product_for_key :meth:`spawn ` all these greenthreads: :: pool = DAGPool() # the upstream producer keys passed to spawn() can be from any iterable, # including a generator pool.spawn("d", ("b", "c"), builder) pool.spawn("e", ["c"], builder) pool.spawn("b", ("a", "zlib"), builder) pool.spawn("c", ["zlib"], builder) pool.spawn("a", (), builder) As with :func:`eventlet.spawn() `, if you need to pass special build flags to some set of builds, these can be passed as either positional or keyword arguments: :: def builder(key, upstream, cflags="", linkflags=""): ... pool.spawn("d", ("b", "c"), builder, "-o2") pool.spawn("e", ["c"], builder, linkflags="-pie") However, if the arguments to each builder() call are uniform (as in the original example), you could alternatively build a dict of the dependencies and call :meth:`spawn_many() `: :: deps = dict(d=("b", "c"), e=["c"], b=("a", "zlib"), c=["zlib"], a=()) pool.spawn_many(deps, builder) From outside the DAGPool, you can obtain the results for d and e (or in fact for any of the build greenthreads) in any of several ways. :meth:`pool.waitall() ` waits until the last of the spawned greenthreads has completed, and returns a dict containing results for *all* of them: :: final = pool.waitall() print("for d: {0}".format(final["d"])) print("for e: {0}".format(final["e"])) waitall() is an alias for :meth:`wait() ` with no arguments: :: final = pool.wait() print("for d: {0}".format(final["d"])) print("for e: {0}".format(final["e"])) Or you can specifically wait for only the final programs: :: final = pool.wait(["d", "e"]) The returned dict will contain only the specified keys. The keys may be passed into wait() from any iterable, including a generator. You can wait for any specified set of greenthreads; they need not be topologically last: :: # returns as soon as both a and zlib have returned results, regardless of # what else is still running leaves = pool.wait(["a", "zlib"]) Suppose you want to wait specifically for just *one* of the final programs: :: final = pool.wait(["d"]) dprog = final["d"] The above wait() call will return as soon as greenthread d returns a result -- regardless of whether greenthread e has finished. :meth:`__getitem()__ ` is shorthand for obtaining a single result: :: # waits until greenthread d returns its result dprog = pool["d"] In contrast, :meth:`get() ` returns immediately, whether or not a result is ready: :: # returns immediately if pool.get("d") is None: ... Of course, your greenthread might not include an explicit return statement and hence might implicitly return None. You might have to test some other value. :: # returns immediately if pool.get("d", "notdone") == "notdone": ... Suppose you want to process each of the final programs in some way (upload it?), but you don't want to have to wait until they've both finished. You don't have to poll get() calls -- use :meth:`wait_each() `: :: for key, result in pool.wait_each(["d", "e"]): # key will be d or e, in completion order # process result... As with :meth:`wait() `, if you omit the argument to wait_each(), it delivers results for all the greenthreads of which it's aware: :: for key, result in pool.wait_each(): # key will be a, zlib, b, c, d, e, in whatever order each completes # process its result... Introspection ------------- Let's say you have set up a :class:`DAGPool ` with the dependencies shown above. To your consternation, your :meth:`waitall() ` call does not return! The DAGPool instance is stuck! You could change waitall() to :meth:`wait_each() `, and print each key as it becomes available: :: for key, result in pool.wait_each(): print("got result for {0}".format(key)) # ... process ... Once the build for a has completed, this produces: :: got result for a and then stops. Hmm! You can check the number of :meth:`running ` greenthreads: :: >>> print(pool.running()) 4 and the number of :meth:`waiting ` greenthreads: :: >>> print(pool.waiting()) 4 It's often more informative to ask *which* greenthreads are :meth:`still running `: :: >>> print(pool.running_keys()) ('c', 'b', 'e', 'd') but in this case, we already know a has completed. We can ask for all available results: :: >>> print(pool.keys()) ('a',) >>> print(pool.items()) (('a', result_from_a),) The :meth:`keys() ` and :meth:`items() ` methods only return keys and items for which results are actually available, reflecting the underlying dict. But what's blocking the works? What are we :meth:`waiting for `? :: >>> print(pool.waiting_for("d")) set(['c', 'b']) (waiting_for()'s optional argument is a *single* key.) That doesn't help much yet... :: >>> print(pool.waiting_for("b")) set(['zlib']) >>> print(pool.waiting_for("zlib")) KeyError: 'zlib' Aha! We forgot to even include the zlib build when we were originally configuring this DAGPool! (For non-interactive use, it would be more informative to omit waiting_for()'s argument. This usage returns a dict indicating, for each greenthread key, which other keys it's waiting for.) :: from pprint import pprint pprint(pool.waiting_for()) {'b': set(['zlib']), 'c': set(['zlib']), 'd': set(['b', 'c']), 'e': set(['c'])} In this case, a reasonable fix would be to spawn the zlib greenthread: :: pool.spawn("zlib", (), builder) Even if this is the last method call on this DAGPool instance, it should unblock all the rest of the DAGPool greenthreads. Posting ------- If we happen to have zlib build results in hand already, though, we could instead :meth:`post() ` that result instead of rebuilding the library: :: pool.post("zlib", result_from_zlib) This, too, should unblock the rest of the DAGPool greenthreads. Preloading ---------- If rebuilding takes nontrivial realtime, it might be useful to record partial results, so that in case of interruption you can restart from where you left off rather than having to rebuild everything prior to that point. You could iteratively :meth:`post() ` those prior results into a new DAGPool instance; alternatively you can :meth:`preload ` the :class:`DAGPool ` from an existing dict: :: pool = DAGPool(dict(a=result_from_a, zlib=result_from_zlib)) Any DAGPool greenthreads that depend on either a or zlib can immediately consume those results. It also works to construct DAGPool with an iterable of (key, result) pairs. Exception Propagation --------------------- But what if we spawn a zlib build that fails? Suppose the zlib greenthread terminates with an exception? In that case none of b, c, d or e can proceed! Nor do we want to wait forever for them. :: dprog = pool["d"] eventlet.dagpool.PropagateError: PropagateError(d): PropagateError: PropagateError(c): PropagateError: PropagateError(zlib): OriginalError DAGPool provides a :class:`PropagateError ` exception specifically to wrap such failures. If a DAGPool greenthread terminates with an Exception subclass, the DAGPool wraps that exception in a PropagateError instance whose *key* attribute is the key of the failing greenthread and whose *exc* attribute is the exception that terminated it. This PropagateError is stored as the result from that greenthread. Attempting to consume the result from a greenthread for which a PropagateError was stored raises that PropagateError. :: pool["zlib"] eventlet.dagpool.PropagateError: PropagateError(zlib): OriginalError Thus, when greenthread c attempts to consume the result from zlib, the PropagateError for zlib is raised. Unless the builder function for greenthread c handles that PropagateError exception, that greenthread will itself terminate. That PropagateError will be wrapped in another PropagateError whose *key* attribute is c and whose *exc* attribute is the PropagateError for zlib. Similarly, when greenthread d attempts to consume the result from c, the PropagateError for c is raised. This in turn is wrapped in a PropagateError whose *key* is d and whose *exc* is the PropagateError for c. When someone attempts to consume the result from d, as shown above, the PropagateError for d is raised. You can programmatically chase the failure path to determine the original failure if desired: :: orig_err = err key = "unknown" while isinstance(orig_err, PropagateError): key = orig_err.key orig_err = orig_err.exc Scanning for Success / Exceptions --------------------------------- Exception propagation means that we neither perform useless builds nor wait for results that will never arrive. However, it does make it difficult to obtain *partial* results for builds that *did* succeed. For that you can call :meth:`wait_each_success() `: :: for key, result in pool.wait_each_success(): print("{0} succeeded".format(key)) # ... process result ... a succeeded Another problem is that although five different greenthreads failed in the example, we only see one chain of failures. You can enumerate the bad news with :meth:`wait_each_exception() `: :: for key, err in pool.wait_each_exception(): print("{0} failed with {1}".format(key, err.exc.__class__.__name__)) c failed with PropagateError b failed with PropagateError e failed with PropagateError d failed with PropagateError zlib failed with OriginalError wait_each_exception() yields each PropagateError wrapper as if it were the result, rather than raising it as an exception. Notice that we print :code:`err.exc.__class__.__name__` because :code:`err.__class__.__name__` is always PropagateError. Both wait_each_success() and wait_each_exception() can accept an iterable of keys to report: :: for key, result in pool.wait_each_success(["d", "e"]): print("{0} succeeded".format(key)) (no output) for key, err in pool.wait_each_exception(["d", "e"]): print("{0} failed with {1}".format(key, err.exc.__class__.__name__)) e failed with PropagateError d failed with PropagateError Both wait_each_success() and wait_each_exception() must wait until the greenthreads for all specified keys (or all keys) have terminated, one way or the other, because of course we can't know until then how to categorize each. Module Contents =============== .. automodule:: eventlet.dagpool :members: eventlet-0.30.2/doc/modules/db_pool.rst0000644000076500000240000001111614006212666020444 0ustar temotostaff00000000000000:mod:`db_pool` -- DBAPI 2 database connection pooling ======================================================== The db_pool module is useful for managing database connections. It provides three primary benefits: cooperative yielding during database operations, concurrency limiting to a database host, and connection reuse. db_pool is intended to be database-agnostic, compatible with any DB-API 2.0 database module. *It has currently been tested and used with both MySQLdb and psycopg2.* A ConnectionPool object represents a pool of connections open to a particular database. The arguments to the constructor include the database-software-specific module, the host name, and the credentials required for authentication. After construction, the ConnectionPool object decides when to create and sever connections with the target database. >>> import MySQLdb >>> cp = ConnectionPool(MySQLdb, host='localhost', user='root', passwd='') Once you have this pool object, you connect to the database by calling :meth:`~eventlet.db_pool.ConnectionPool.get` on it: >>> conn = cp.get() This call may either create a new connection, or reuse an existing open connection, depending on whether it has one open already or not. You can then use the connection object as normal. When done, you must return the connection to the pool: >>> conn = cp.get() >>> try: ... result = conn.cursor().execute('SELECT NOW()') ... finally: ... cp.put(conn) After you've returned a connection object to the pool, it becomes useless and will raise exceptions if any of its methods are called. Constructor Arguments ---------------------- In addition to the database credentials, there are a bunch of keyword constructor arguments to the ConnectionPool that are useful. * min_size, max_size : The normal Pool arguments. max_size is the most important constructor argument -- it determines the number of concurrent connections can be open to the destination database. min_size is not very useful. * max_idle : Connections are only allowed to remain unused in the pool for a limited amount of time. An asynchronous timer periodically wakes up and closes any connections in the pool that have been idle for longer than they are supposed to be. Without this parameter, the pool would tend to have a 'high-water mark', where the number of connections open at a given time corresponds to the peak historical demand. This number only has effect on the connections in the pool itself -- if you take a connection out of the pool, you can hold on to it for as long as you want. If this is set to 0, every connection is closed upon its return to the pool. * max_age : The lifespan of a connection. This works much like max_idle, but the timer is measured from the connection's creation time, and is tracked throughout the connection's life. This means that if you take a connection out of the pool and hold on to it for some lengthy operation that exceeds max_age, upon putting the connection back in to the pool, it will be closed. Like max_idle, max_age will not close connections that are taken out of the pool, and, if set to 0, will cause every connection to be closed when put back in the pool. * connect_timeout : How long to wait before raising an exception on connect(). If the database module's connect() method takes too long, it raises a ConnectTimeout exception from the get() method on the pool. DatabaseConnector ----------------- If you want to connect to multiple databases easily (and who doesn't), the DatabaseConnector is for you. It's a pool of pools, containing a ConnectionPool for every host you connect to. The constructor arguments are: * module : database module, e.g. MySQLdb. This is simply passed through to the ConnectionPool. * credentials : A dictionary, or dictionary-alike, mapping hostname to connection-argument-dictionary. This is used for the constructors of the ConnectionPool objects. Example: >>> dc = DatabaseConnector(MySQLdb, ... {'db.internal.example.com': {'user': 'internal', 'passwd': 's33kr1t'}, ... 'localhost': {'user': 'root', 'passwd': ''}}) If the credentials contain a host named 'default', then the value for 'default' is used whenever trying to connect to a host that has no explicit entry in the database. This is useful if there is some pool of hosts that share arguments. * conn_pool : The connection pool class to use. Defaults to db_pool.ConnectionPool. The rest of the arguments to the DatabaseConnector constructor are passed on to the ConnectionPool. *Caveat: The DatabaseConnector is a bit unfinished, it only suits a subset of use cases.* .. automodule:: eventlet.db_pool :members: :undoc-members: eventlet-0.30.2/doc/modules/debug.rst0000644000076500000240000000021314006212666020110 0ustar temotostaff00000000000000:mod:`debug` -- Debugging tools for Eventlet ================================================== .. automodule:: eventlet.debug :members: eventlet-0.30.2/doc/modules/event.rst0000644000076500000240000000021214006212666020142 0ustar temotostaff00000000000000:mod:`event` -- Cross-greenthread primitive ================================================== .. automodule:: eventlet.event :members: eventlet-0.30.2/doc/modules/greenpool.rst0000644000076500000240000000020014006212666021010 0ustar temotostaff00000000000000:mod:`greenpool` -- Green Thread Pools ======================================== .. automodule:: eventlet.greenpool :members: eventlet-0.30.2/doc/modules/greenthread.rst0000644000076500000240000000022614006212666021316 0ustar temotostaff00000000000000:mod:`greenthread` -- Green Thread Implementation ================================================== .. automodule:: eventlet.greenthread :members: eventlet-0.30.2/doc/modules/pools.rst0000644000076500000240000000020114006212666020153 0ustar temotostaff00000000000000:mod:`pools` - Generic pools of resources ========================================== .. automodule:: eventlet.pools :members: eventlet-0.30.2/doc/modules/queue.rst0000644000076500000240000000016014006212666020147 0ustar temotostaff00000000000000:mod:`queue` -- Queue class ======================================== .. automodule:: eventlet.queue :members: eventlet-0.30.2/doc/modules/semaphore.rst0000644000076500000240000000041614006212666021012 0ustar temotostaff00000000000000:mod:`semaphore` -- Semaphore classes ================================================== .. autoclass:: eventlet.semaphore.Semaphore :members: .. autoclass:: eventlet.semaphore.BoundedSemaphore :members: .. autoclass:: eventlet.semaphore.CappedSemaphore :members:eventlet-0.30.2/doc/modules/timeout.rst0000644000076500000240000000702014006212666020513 0ustar temotostaff00000000000000:mod:`timeout` -- Universal Timeouts ======================================== .. class:: eventlet.timeout.Timeout Raises *exception* in the current greenthread after *timeout* seconds:: timeout = Timeout(seconds, exception) try: ... # execution here is limited by timeout finally: timeout.cancel() When *exception* is omitted or is ``None``, the :class:`Timeout` instance itself is raised: >>> Timeout(0.1) >>> eventlet.sleep(0.2) Traceback (most recent call last): ... Timeout: 0.1 seconds You can use the ``with`` statement for additional convenience:: with Timeout(seconds, exception) as timeout: pass # ... code block ... This is equivalent to the try/finally block in the first example. There is an additional feature when using the ``with`` statement: if *exception* is ``False``, the timeout is still raised, but the with statement suppresses it, so the code outside the with-block won't see it:: data = None with Timeout(5, False): data = mysock.makefile().readline() if data is None: ... # 5 seconds passed without reading a line else: ... # a line was read within 5 seconds As a very special case, if *seconds* is None, the timer is not scheduled, and is only useful if you're planning to raise it directly. There are two Timeout caveats to be aware of: * If the code block in the try/finally or with-block never cooperatively yields, the timeout cannot be raised. In Eventlet, this should rarely be a problem, but be aware that you cannot time out CPU-only operations with this class. * If the code block catches and doesn't re-raise :class:`BaseException` (for example, with ``except:``), then it will catch the Timeout exception, and might not abort as intended. When catching timeouts, keep in mind that the one you catch may not be the one you set; if you plan on silencing a timeout, always check that it's the same instance that you set:: timeout = Timeout(1) try: ... except Timeout as t: if t is not timeout: raise # not my timeout .. automethod:: cancel .. autoattribute:: pending .. function:: eventlet.timeout.with_timeout(seconds, function, *args, **kwds) Wrap a call to some (yielding) function with a timeout; if the called function fails to return before the timeout, cancel it and return a flag value. :param seconds: seconds before timeout occurs :type seconds: int or float :param func: the callable to execute with a timeout; it must cooperatively yield, or else the timeout will not be able to trigger :param \*args: positional arguments to pass to *func* :param \*\*kwds: keyword arguments to pass to *func* :param timeout_value: value to return if timeout occurs (by default raises :class:`Timeout`) :rtype: Value returned by *func* if *func* returns before *seconds*, else *timeout_value* if provided, else raises :class:`Timeout`. :exception Timeout: if *func* times out and no ``timeout_value`` has been provided. :exception: Any exception raised by *func* Example:: data = with_timeout(30, urllib2.open, 'http://www.google.com/', timeout_value="") Here *data* is either the result of the ``get()`` call, or the empty string if it took too long to return. Any exception raised by the ``get()`` call is passed through to the caller. eventlet-0.30.2/doc/modules/websocket.rst0000644000076500000240000000225514006212666021020 0ustar temotostaff00000000000000:mod:`websocket` -- Websocket Server ===================================== This module provides a simple way to create a `websocket `_ server. It works with a few tweaks in the :mod:`~eventlet.wsgi` module that allow websockets to coexist with other WSGI applications. To create a websocket server, simply decorate a handler method with :class:`WebSocketWSGI` and use it as a wsgi application:: from eventlet import wsgi, websocket import eventlet @websocket.WebSocketWSGI def hello_world(ws): ws.send("hello world") wsgi.server(eventlet.listen(('', 8090)), hello_world) .. note:: Please see graceful termination warning in :func:`~eventlet.wsgi.server` documentation You can find a slightly more elaborate version of this code in the file ``examples/websocket.py``. As of version 0.9.13, eventlet.websocket supports SSL websockets; all that's necessary is to use an :ref:`SSL wsgi server `. .. note :: The web socket spec is still under development, and it will be necessary to change the way that this module works in response to spec changes. .. automodule:: eventlet.websocket :members: eventlet-0.30.2/doc/modules/wsgi.rst0000644000076500000240000001253714006212666020007 0ustar temotostaff00000000000000:mod:`wsgi` -- WSGI server =========================== The wsgi module provides a simple and easy way to start an event-driven `WSGI `_ server. This can serve as an embedded web server in an application, or as the basis for a more full-featured web server package. One such package is `Spawning `_. To launch a wsgi server, simply create a socket and call :func:`eventlet.wsgi.server` with it:: from eventlet import wsgi import eventlet def hello_world(env, start_response): start_response('200 OK', [('Content-Type', 'text/plain')]) return ['Hello, World!\r\n'] wsgi.server(eventlet.listen(('', 8090)), hello_world) You can find a slightly more elaborate version of this code in the file ``examples/wsgi.py``. .. automodule:: eventlet.wsgi :members: .. _wsgi_ssl: SSL --- Creating a secure server is only slightly more involved than the base example. All that's needed is to pass an SSL-wrapped socket to the :func:`~eventlet.wsgi.server` method:: wsgi.server(eventlet.wrap_ssl(eventlet.listen(('', 8090)), certfile='cert.crt', keyfile='private.key', server_side=True), hello_world) Applications can detect whether they are inside a secure server by the value of the ``env['wsgi.url_scheme']`` environment variable. Non-Standard Extension to Support Post Hooks -------------------------------------------- Eventlet's WSGI server supports a non-standard extension to the WSGI specification where :samp:`env['eventlet.posthooks']` contains an array of `post hooks` that will be called after fully sending a response. Each post hook is a tuple of :samp:`(func, args, kwargs)` and the `func` will be called with the WSGI environment dictionary, followed by the `args` and then the `kwargs` in the post hook. For example:: from eventlet import wsgi import eventlet def hook(env, arg1, arg2, kwarg3=None, kwarg4=None): print('Hook called: %s %s %s %s %s' % (env, arg1, arg2, kwarg3, kwarg4)) def hello_world(env, start_response): env['eventlet.posthooks'].append( (hook, ('arg1', 'arg2'), {'kwarg3': 3, 'kwarg4': 4})) start_response('200 OK', [('Content-Type', 'text/plain')]) return ['Hello, World!\r\n'] wsgi.server(eventlet.listen(('', 8090)), hello_world) The above code will print the WSGI environment and the other passed function arguments for every request processed. Post hooks are useful when code needs to be executed after a response has been fully sent to the client (or when the client disconnects early). One example is for more accurate logging of bandwidth used, as client disconnects use less bandwidth than the actual Content-Length. "100 Continue" Response Headers ------------------------------- Eventlet's WSGI server supports sending (optional) headers with HTTP "100 Continue" provisional responses. This is useful in such cases where a WSGI server expects to complete a PUT request as a single HTTP request/response pair, and also wants to communicate back to client as part of the same HTTP transaction. An example is where the HTTP server wants to pass hints back to the client about characteristics of data payload it can accept. As an example, an HTTP server may pass a hint in a header the accompanying "100 Continue" response to the client indicating it can or cannot accept encrypted data payloads, and thus client can make the encrypted vs unencrypted decision before starting to send the data). This works well for WSGI servers as the WSGI specification mandates HTTP expect/continue mechanism (PEP333). To define the "100 Continue" response headers, one may call :func:`set_hundred_continue_response_header` on :samp:`env['wsgi.input']` as shown in the following example:: from eventlet import wsgi import eventlet def wsgi_app(env, start_response): # Define "100 Continue" response headers env['wsgi.input'].set_hundred_continue_response_headers( [('Hundred-Continue-Header-1', 'H1'), ('Hundred-Continue-Header-k', 'Hk')]) # The following read() causes "100 Continue" response to # the client. Headers 'Hundred-Continue-Header-1' and # 'Hundred-Continue-Header-K' are sent with the response # following the "HTTP/1.1 100 Continue\r\n" status line text = env['wsgi.input'].read() start_response('200 OK', [('Content-Length', str(len(text)))]) return [text] You can find a more elaborate example in the file: ``tests/wsgi_test.py``, :func:`test_024a_expect_100_continue_with_headers`. Per HTTP RFC 7231 (http://tools.ietf.org/html/rfc7231#section-6.2) a client is required to be able to process one or more 100 continue responses. A sample use case might be a user protocol where the server may want to use a 100-continue response to indicate to a client that it is working on a request and the client should not timeout. To support multiple 100-continue responses, evenlet wsgi module exports the API :func:`send_hundred_continue_response`. Sample use cases for chunked and non-chunked HTTP scenarios are included in the wsgi test case ``tests/wsgi_test.py``, :func:`test_024b_expect_100_continue_with_headers_multiple_chunked` and :func:`test_024c_expect_100_continue_with_headers_multiple_nonchunked`. eventlet-0.30.2/doc/modules/zmq.rst0000644000076500000240000000122214006212666017632 0ustar temotostaff00000000000000:mod:`eventlet.green.zmq` -- ØMQ support ======================================== :mod:`pyzmq ` [1]_ is a python binding to the C++ ØMQ [2]_ library written in Cython [3]_. :mod:`eventlet.green.zmq` is greenthread aware version of `pyzmq`. .. automodule:: eventlet.green.zmq :show-inheritance: .. currentmodule:: eventlet.green.zmq .. autoclass:: Context :show-inheritance: .. automethod:: socket .. autoclass:: Socket :show-inheritance: :inherited-members: .. automethod:: recv .. automethod:: send .. module:: zmq .. [1] http://github.com/zeromq/pyzmq .. [2] http://www.zeromq.com .. [3] http://www.cython.org eventlet-0.30.2/doc/modules.rst0000644000076500000240000000054214006212666017027 0ustar temotostaff00000000000000Module Reference ====================== .. toctree:: :maxdepth: 2 modules/backdoor modules/corolocal modules/dagpool modules/debug modules/db_pool modules/event modules/greenpool modules/greenthread modules/pools modules/queue modules/semaphore modules/timeout modules/websocket modules/wsgi modules/zmq eventlet-0.30.2/doc/patching.rst0000644000076500000240000001337514006212666017164 0ustar temotostaff00000000000000Greening The World ================== One of the challenges of writing a library like Eventlet is that the built-in networking libraries don't natively support the sort of cooperative yielding that we need. What we must do instead is patch standard library modules in certain key places so that they do cooperatively yield. We've in the past considered doing this automatically upon importing Eventlet, but have decided against that course of action because it is un-Pythonic to change the behavior of module A simply by importing module B. Therefore, the application using Eventlet must explicitly green the world for itself, using one or both of the convenient methods provided. .. _import-green: Import Green -------------- The first way of greening an application is to import networking-related libraries from the ``eventlet.green`` package. It contains libraries that have the same interfaces as common standard ones, but they are modified to behave well with green threads. Using this method is a good engineering practice, because the true dependencies are apparent in every file:: from eventlet.green import socket from eventlet.green import threading from eventlet.green import asyncore This works best if every library can be imported green in this manner. If ``eventlet.green`` lacks a module (for example, non-python-standard modules), then :func:`~eventlet.patcher.import_patched` function can come to the rescue. It is a replacement for the builtin import statement that greens any module on import. .. function:: eventlet.patcher.import_patched(module_name, *additional_modules, **kw_additional_modules) Imports a module in a greened manner, so that the module's use of networking libraries like socket will use Eventlet's green versions instead. The only required argument is the name of the module to be imported:: import eventlet httplib2 = eventlet.import_patched('httplib2') Under the hood, it works by temporarily swapping out the "normal" versions of the libraries in sys.modules for an eventlet.green equivalent. When the import of the to-be-patched module completes, the state of sys.modules is restored. Therefore, if the patched module contains the statement 'import socket', import_patched will have it reference eventlet.green.socket. One weakness of this approach is that it doesn't work for late binding (i.e. imports that happen during runtime). Late binding of imports is fortunately rarely done (it's slow and against `PEP-8 `_), so in most cases import_patched will work just fine. One other aspect of import_patched is the ability to specify exactly which modules are patched. Doing so may provide a slight performance benefit since only the needed modules are imported, whereas import_patched with no arguments imports a bunch of modules in case they're needed. The *additional_modules* and *kw_additional_modules* arguments are both sequences of name/module pairs. Either or both can be used:: from eventlet.green import socket from eventlet.green import SocketServer BaseHTTPServer = eventlet.import_patched('BaseHTTPServer', ('socket', socket), ('SocketServer', SocketServer)) BaseHTTPServer = eventlet.import_patched('BaseHTTPServer', socket=socket, SocketServer=SocketServer) .. _monkey-patch: Monkeypatching the Standard Library ---------------------------------------- The other way of greening an application is simply to monkeypatch the standard library. This has the disadvantage of appearing quite magical, but the advantage of avoiding the late-binding problem. .. function:: eventlet.patcher.monkey_patch(os=None, select=None, socket=None, thread=None, time=None, psycopg=None) This function monkeypatches the key system modules by replacing their key elements with green equivalents. If no arguments are specified, everything is patched:: import eventlet eventlet.monkey_patch() The keyword arguments afford some control over which modules are patched, in case that's important. Most patch the single module of the same name (e.g. time=True means that the time module is patched [time.sleep is patched by eventlet.sleep]). The exceptions to this rule are *socket*, which also patches the :mod:`ssl` module if present; and *thread*, which patches :mod:`thread`, :mod:`threading`, and :mod:`Queue`. Here's an example of using monkey_patch to patch only a few modules:: import eventlet eventlet.monkey_patch(socket=True, select=True) It is important to call :func:`~eventlet.patcher.monkey_patch` as early in the lifetime of the application as possible. Try to do it as one of the first lines in the main module. The reason for this is that sometimes there is a class that inherits from a class that needs to be greened -- e.g. a class that inherits from socket.socket -- and inheritance is done at import time, so therefore the monkeypatching should happen before the derived class is defined. It's safe to call monkey_patch multiple times. The psycopg monkeypatching relies on Daniele Varrazzo's green psycopg2 branch; see `the announcement `_ for more information. .. function:: eventlet.patcher.is_monkey_patched(module) Returns whether or not the specified module is currently monkeypatched. *module* can either be the module itself or the module's name. Based entirely off the name of the module, so if you import a module some other way than with the import keyword (including :func:`~eventlet.patcher.import_patched`), is_monkey_patched might not be correct about that particular module. eventlet-0.30.2/doc/ssl.rst0000644000076500000240000000532614006212666016165 0ustar temotostaff00000000000000Using SSL With Eventlet ======================== Eventlet makes it easy to use non-blocking SSL sockets. If you're using Python 2.7 or later, you're all set, eventlet wraps the built-in ssl module. In either case, the ``green`` modules handle SSL sockets transparently, just like their standard counterparts. As an example, :mod:`eventlet.green.urllib2` can be used to fetch https urls in as non-blocking a fashion as you please:: from eventlet.green.urllib.request import urlopen from eventlet import spawn bodies = [spawn(urlopen, url) for url in ("https://secondlife.com","https://google.com")] for b in bodies: print(b.wait().read()) PyOpenSSL ---------- :mod:`eventlet.green.OpenSSL` has exactly the same interface as pyOpenSSL_ `(docs) `_, and works in all versions of Python. This module is much more powerful than :func:`socket.ssl`, and may have some advantages over :mod:`ssl`, depending on your needs. For testing purpose first create self-signed certificate using following commands :: $ openssl genrsa 1024 > server.key $ openssl req -new -x509 -nodes -sha1 -days 365 -key server.key > server.cert Keep these Private key and Self-signed certificate in same directory as `server.py` and `client.py` for simplicity sake. Here's an example of a server (`server.py`) :: from eventlet.green import socket from eventlet.green.OpenSSL import SSL # insecure context, only for example purposes context = SSL.Context(SSL.SSLv23_METHOD) # Pass server's private key created context.use_privatekey_file('server.key') # Pass self-signed certificate created context.use_certificate_file('server.cert') # create underlying green socket and wrap it in ssl sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) connection = SSL.Connection(context, sock) # configure as server connection.set_accept_state() connection.bind(('127.0.0.1', 8443)) connection.listen(50) # accept one client connection then close up shop client_conn, addr = connection.accept() print(client_conn.read(100)) client_conn.shutdown() client_conn.close() connection.close() Here's an example of a client (`client.py`) :: import socket # Create socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Connect to server s.connect(('127.0.0.1', 8443)) sslSocket = socket.ssl(s) print repr(sslSocket.server()) print repr(sslSocket.issuer()) sslSocket.write('Hello secure socket\n') # Close client s.close() Running example:: In first terminal $ python server.py In another terminal $ python client.py .. _pyOpenSSL: https://launchpad.net/pyopenssl eventlet-0.30.2/doc/testing.rst0000644000076500000240000001075314006212666017041 0ustar temotostaff00000000000000Testing Eventlet ================ Eventlet is tested using `Nose `_. To run tests, simply install nose, and then, in the eventlet tree, do: .. code-block:: sh $ python setup.py test If you want access to all the nose plugins via command line, you can run: .. code-block:: sh $ python setup.py nosetests Lastly, you can just use nose directly if you want: .. code-block:: sh $ nosetests That's it! The output from running nose is the same as unittest's output, if the entire directory was one big test file. Many tests are skipped based on environmental factors; for example, it makes no sense to test kqueue-specific functionality when your OS does not support it. These are printed as S's during execution, and in the summary printed after the tests run it will tell you how many were skipped. Doctests -------- To run the doctests included in many of the eventlet modules, use this command: .. code-block :: sh $ nosetests --with-doctest eventlet/*.py Currently there are 16 doctests. Standard Library Tests ---------------------- Eventlet provides the ability to test itself with the standard Python networking tests. This verifies that the libraries it wraps work at least as well as the standard ones do. The directory tests/stdlib contains a bunch of stubs that import the standard lib tests from your system and run them. If you do not have any tests in your python distribution, they'll simply fail to import. There's a convenience module called all.py designed to handle the impedance mismatch between Nose and the standard tests: .. code-block:: sh $ nosetests tests/stdlib/all.py That will run all the tests, though the output will be a little weird because it will look like Nose is running about 20 tests, each of which consists of a bunch of sub-tests. Not all test modules are present in all versions of Python, so there will be an occasional printout of "Not importing %s, it doesn't exist in this installation/version of Python". If you see "Ran 0 tests in 0.001s", it means that your Python installation lacks its own tests. This is usually the case for Linux distributions. One way to get the missing tests is to download a source tarball (of the same version you have installed on your system!) and copy its Lib/test directory into the correct place on your PYTHONPATH. Testing Eventlet Hubs --------------------- When you run the tests, Eventlet will use the most appropriate hub for the current platform to do its dispatch. It's sometimes useful when making changes to Eventlet to test those changes on hubs other than the default. You can do this with the ``EVENTLET_HUB`` environment variable. .. code-block:: sh $ EVENTLET_HUB=epolls nosetests See :ref:`understanding_hubs` for the full list of hubs. Writing Tests ------------- What follows are some notes on writing tests, in no particular order. The filename convention when writing a test for module `foo` is to name the test `foo_test.py`. We don't yet have a convention for tests that are of finer granularity, but a sensible one might be `foo_class_test.py`. If you are writing a test that involves a client connecting to a spawned server, it is best to not use a hardcoded port because that makes it harder to parallelize tests. Instead bind the server to 0, and then look up its port when connecting the client, like this:: server_sock = eventlet.listener(('127.0.0.1', 0)) client_sock = eventlet.connect(('localhost', server_sock.getsockname()[1])) Coverage -------- Coverage.py is an awesome tool for evaluating how much code was exercised by unit tests. Nose supports it if both are installed, so it's easy to generate coverage reports for eventlet. Here's how: .. code-block:: sh nosetests --with-coverage --cover-package=eventlet After running the tests to completion, this will emit a huge wodge of module names and line numbers. For some reason, the ``--cover-inclusive`` option breaks everything rather than serving its purpose of limiting the coverage to the local files, so don't use that. The html option is quite useful because it generates nicely-formatted HTML files that are much easier to read than line-number soup. Here's a command that generates the annotation, dumping the html files into a directory called "cover": .. code-block:: sh coverage html -d cover --omit='tempmod,,tests' (``tempmod`` and ``console`` are omitted because they get thrown away at the completion of their unit tests and coverage.py isn't smart enough to detect this.) eventlet-0.30.2/doc/threading.rst0000644000076500000240000000320414006212666017322 0ustar temotostaff00000000000000Threads ======== Eventlet is thread-safe and can be used in conjunction with normal Python threads. The way this works is that coroutines are confined to their 'parent' Python thread. It's like each thread contains its own little world of coroutines that can switch between themselves but not between coroutines in other threads. .. image:: /images/threading_illustration.png You can only communicate cross-thread using the "real" thread primitives and pipes. Fortunately, there's little reason to use threads for concurrency when you're already using coroutines. The vast majority of the times you'll want to use threads are to wrap some operation that is not "green", such as a C library that uses its own OS calls to do socket operations. The :mod:`~eventlet.tpool` module is provided to make these uses simpler. The optional :ref:`pyevent hub ` is not compatible with threads. Tpool - Simple thread pool --------------------------- The simplest thing to do with :mod:`~eventlet.tpool` is to :func:`~eventlet.tpool.execute` a function with it. The function will be run in a random thread in the pool, while the calling coroutine blocks on its completion:: >>> import thread >>> from eventlet import tpool >>> def my_func(starting_ident): ... print("running in new thread:", starting_ident != thread.get_ident()) ... >>> tpool.execute(my_func, thread.get_ident()) running in new thread: True By default there are 20 threads in the pool, but you can configure this by setting the environment variable ``EVENTLET_THREADPOOL_SIZE`` to the desired pool size before importing tpool. .. automodule:: eventlet.tpool :members: eventlet-0.30.2/doc/zeromq.rst0000644000076500000240000000321614006212666016675 0ustar temotostaff00000000000000Zeromq ###### What is ØMQ? ============ "A ØMQ socket is what you get when you take a normal TCP socket, inject it with a mix of radioactive isotopes stolen from a secret Soviet atomic research project, bombard it with 1950-era cosmic rays, and put it into the hands of a drug-addled comic book author with a badly-disguised fetish for bulging muscles clad in spandex." Key differences to conventional sockets Generally speaking, conventional sockets present a synchronous interface to either connection-oriented reliable byte streams (SOCK_STREAM), or connection-less unreliable datagrams (SOCK_DGRAM). In comparison, 0MQ sockets present an abstraction of an asynchronous message queue, with the exact queueing semantics depending on the socket type in use. Where conventional sockets transfer streams of bytes or discrete datagrams, 0MQ sockets transfer discrete messages. 0MQ sockets being asynchronous means that the timings of the physical connection setup and teardown, reconnect and effective delivery are transparent to the user and organized by 0MQ itself. Further, messages may be queued in the event that a peer is unavailable to receive them. Conventional sockets allow only strict one-to-one (two peers), many-to-one (many clients, one server), or in some cases one-to-many (multicast) relationships. With the exception of ZMQ::PAIR, 0MQ sockets may be connected to multiple endpoints using connect(), while simultaneously accepting incoming connections from multiple endpoints bound to the socket using bind(), thus allowing many-to-many relationships. API documentation ================= ØMQ support is provided in the :mod:`eventlet.green.zmq` module. eventlet-0.30.2/eventlet/0000755000076500000240000000000014017673044015710 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/__init__.py0000644000076500000240000000464514017672610020030 0ustar temotostaff00000000000000import os import sys import warnings if sys.version_info < (3, 5): warnings.warn( "Support for your Python version is deprecated and will be removed in the future", DeprecationWarning, ) version_info = (0, 30, 2) __version__ = '.'.join(map(str, version_info)) # This is to make Debian packaging easier, it ignores import # errors of greenlet so that the packager can still at least # access the version. Also this makes easy_install a little quieter if os.environ.get('EVENTLET_IMPORT_VERSION_ONLY') != '1': from eventlet import convenience from eventlet import event from eventlet import greenpool from eventlet import greenthread from eventlet import patcher from eventlet import queue from eventlet import semaphore from eventlet import support from eventlet import timeout import greenlet # Force monotonic library search as early as possible. # Helpful when CPython < 3.5 on Linux blocked in `os.waitpid(-1)` before first use of hub. # Example: gunicorn # https://github.com/eventlet/eventlet/issues/401#issuecomment-327500352 try: import monotonic del monotonic except ImportError: pass connect = convenience.connect listen = convenience.listen serve = convenience.serve StopServe = convenience.StopServe wrap_ssl = convenience.wrap_ssl Event = event.Event GreenPool = greenpool.GreenPool GreenPile = greenpool.GreenPile sleep = greenthread.sleep spawn = greenthread.spawn spawn_n = greenthread.spawn_n spawn_after = greenthread.spawn_after kill = greenthread.kill import_patched = patcher.import_patched monkey_patch = patcher.monkey_patch Queue = queue.Queue Semaphore = semaphore.Semaphore CappedSemaphore = semaphore.CappedSemaphore BoundedSemaphore = semaphore.BoundedSemaphore Timeout = timeout.Timeout with_timeout = timeout.with_timeout wrap_is_timeout = timeout.wrap_is_timeout is_timeout = timeout.is_timeout getcurrent = greenlet.greenlet.getcurrent # deprecated TimeoutError, exc_after, call_after_global = ( support.wrap_deprecated(old, new)(fun) for old, new, fun in ( ('TimeoutError', 'Timeout', Timeout), ('exc_after', 'greenthread.exc_after', greenthread.exc_after), ('call_after_global', 'greenthread.call_after_global', greenthread.call_after_global), )) del os eventlet-0.30.2/eventlet/backdoor.py0000644000076500000240000001004214006212666020040 0ustar temotostaff00000000000000from __future__ import print_function from code import InteractiveConsole import errno import socket import sys import errno import traceback import eventlet from eventlet import hubs from eventlet.support import greenlets, get_errno try: sys.ps1 except AttributeError: sys.ps1 = '>>> ' try: sys.ps2 except AttributeError: sys.ps2 = '... ' class FileProxy(object): def __init__(self, f): self.f = f def isatty(self): return True def flush(self): pass def write(self, data, *a, **kw): try: self.f.write(data, *a, **kw) self.f.flush() except socket.error as e: if get_errno(e) != errno.EPIPE: raise def readline(self, *a): return self.f.readline(*a).replace('\r\n', '\n') def __getattr__(self, attr): return getattr(self.f, attr) # @@tavis: the `locals` args below mask the built-in function. Should # be renamed. class SocketConsole(greenlets.greenlet): def __init__(self, desc, hostport, locals): self.hostport = hostport self.locals = locals # mangle the socket self.desc = FileProxy(desc) greenlets.greenlet.__init__(self) def run(self): try: console = InteractiveConsole(self.locals) console.interact() finally: self.switch_out() self.finalize() def switch(self, *args, **kw): self.saved = sys.stdin, sys.stderr, sys.stdout sys.stdin = sys.stdout = sys.stderr = self.desc greenlets.greenlet.switch(self, *args, **kw) def switch_out(self): sys.stdin, sys.stderr, sys.stdout = self.saved def finalize(self): # restore the state of the socket self.desc = None if len(self.hostport) >= 2: host = self.hostport[0] port = self.hostport[1] print("backdoor closed to %s:%s" % (host, port,)) else: print('backdoor closed') def backdoor_server(sock, locals=None): """ Blocking function that runs a backdoor server on the socket *sock*, accepting connections and running backdoor consoles for each client that connects. The *locals* argument is a dictionary that will be included in the locals() of the interpreters. It can be convenient to stick important application variables in here. """ listening_on = sock.getsockname() if sock.family == socket.AF_INET: # Expand result to IP + port listening_on = '%s:%s' % listening_on elif sock.family == socket.AF_INET6: ip, port, _, _ = listening_on listening_on = '%s:%s' % (ip, port,) # No action needed if sock.family == socket.AF_UNIX print("backdoor server listening on %s" % (listening_on,)) try: while True: socketpair = None try: socketpair = sock.accept() backdoor(socketpair, locals) except socket.error as e: # Broken pipe means it was shutdown if get_errno(e) != errno.EPIPE: raise finally: if socketpair: socketpair[0].close() finally: sock.close() def backdoor(conn_info, locals=None): """Sets up an interactive console on a socket with a single connected client. This does not block the caller, as it spawns a new greenlet to handle the console. This is meant to be called from within an accept loop (such as backdoor_server). """ conn, addr = conn_info if conn.family == socket.AF_INET: host, port = addr print("backdoor to %s:%s" % (host, port)) elif conn.family == socket.AF_INET6: host, port, _, _ = addr print("backdoor to %s:%s" % (host, port)) else: print('backdoor opened') fl = conn.makefile("rw") console = SocketConsole(fl, addr, locals) hub = hubs.get_hub() hub.schedule_call_global(0, console.switch) if __name__ == '__main__': backdoor_server(eventlet.listen(('127.0.0.1', 9000)), {}) eventlet-0.30.2/eventlet/convenience.py0000644000076500000240000001600514006212666020555 0ustar temotostaff00000000000000import sys import warnings from eventlet import greenpool from eventlet import greenthread from eventlet import support from eventlet.green import socket from eventlet.support import greenlets as greenlet def connect(addr, family=socket.AF_INET, bind=None): """Convenience function for opening client sockets. :param addr: Address of the server to connect to. For TCP sockets, this is a (host, port) tuple. :param family: Socket family, optional. See :mod:`socket` documentation for available families. :param bind: Local address to bind to, optional. :return: The connected green socket object. """ sock = socket.socket(family, socket.SOCK_STREAM) if bind is not None: sock.bind(bind) sock.connect(addr) return sock class ReuseRandomPortWarning(Warning): pass class ReusePortUnavailableWarning(Warning): pass def listen(addr, family=socket.AF_INET, backlog=50, reuse_addr=True, reuse_port=None): """Convenience function for opening server sockets. This socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop. Sets SO_REUSEADDR on the socket to save on annoyance. :param addr: Address to listen on. For TCP sockets, this is a (host, port) tuple. :param family: Socket family, optional. See :mod:`socket` documentation for available families. :param backlog: The maximum number of queued connections. Should be at least 1; the maximum value is system-dependent. :return: The listening green socket object. """ sock = socket.socket(family, socket.SOCK_STREAM) if reuse_addr and sys.platform[:3] != 'win': sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if family in (socket.AF_INET, socket.AF_INET6) and addr[1] == 0: if reuse_port: warnings.warn( '''listen on random port (0) with SO_REUSEPORT is dangerous. Double check your intent. Example problem: https://github.com/eventlet/eventlet/issues/411''', ReuseRandomPortWarning, stacklevel=3) elif reuse_port is None: reuse_port = True if reuse_port and hasattr(socket, 'SO_REUSEPORT'): # NOTE(zhengwei): linux kernel >= 3.9 try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) # OSError is enough on Python 3+ except (OSError, socket.error) as ex: if support.get_errno(ex) in (22, 92): # A famous platform defines unsupported socket option. # https://github.com/eventlet/eventlet/issues/380 # https://github.com/eventlet/eventlet/issues/418 warnings.warn( '''socket.SO_REUSEPORT is defined but not supported. On Windows: known bug, wontfix. On other systems: please comment in the issue linked below. More information: https://github.com/eventlet/eventlet/issues/380''', ReusePortUnavailableWarning, stacklevel=3) sock.bind(addr) sock.listen(backlog) return sock class StopServe(Exception): """Exception class used for quitting :func:`~eventlet.serve` gracefully.""" pass def _stop_checker(t, server_gt, conn): try: try: t.wait() finally: conn.close() except greenlet.GreenletExit: pass except Exception: greenthread.kill(server_gt, *sys.exc_info()) def serve(sock, handle, concurrency=1000): """Runs a server on the supplied socket. Calls the function *handle* in a separate greenthread for every incoming client connection. *handle* takes two arguments: the client socket object, and the client address:: def myhandle(client_sock, client_addr): print("client connected", client_addr) eventlet.serve(eventlet.listen(('127.0.0.1', 9999)), myhandle) Returning from *handle* closes the client socket. :func:`serve` blocks the calling greenthread; it won't return until the server completes. If you desire an immediate return, spawn a new greenthread for :func:`serve`. Any uncaught exceptions raised in *handle* are raised as exceptions from :func:`serve`, terminating the server, so be sure to be aware of the exceptions your application can raise. The return value of *handle* is ignored. Raise a :class:`~eventlet.StopServe` exception to gracefully terminate the server -- that's the only way to get the server() function to return rather than raise. The value in *concurrency* controls the maximum number of greenthreads that will be open at any time handling requests. When the server hits the concurrency limit, it stops accepting new connections until the existing ones complete. """ pool = greenpool.GreenPool(concurrency) server_gt = greenthread.getcurrent() while True: try: conn, addr = sock.accept() gt = pool.spawn(handle, conn, addr) gt.link(_stop_checker, server_gt, conn) conn, addr, gt = None, None, None except StopServe: return def wrap_ssl(sock, *a, **kw): """Convenience function for converting a regular socket into an SSL socket. Has the same interface as :func:`ssl.wrap_socket`, but can also use PyOpenSSL. Though, note that it ignores the `cert_reqs`, `ssl_version`, `ca_certs`, `do_handshake_on_connect`, and `suppress_ragged_eofs` arguments when using PyOpenSSL. The preferred idiom is to call wrap_ssl directly on the creation method, e.g., ``wrap_ssl(connect(addr))`` or ``wrap_ssl(listen(addr), server_side=True)``. This way there is no "naked" socket sitting around to accidentally corrupt the SSL session. :return Green SSL object. """ return wrap_ssl_impl(sock, *a, **kw) try: from eventlet.green import ssl wrap_ssl_impl = ssl.wrap_socket except ImportError: # trying PyOpenSSL try: from eventlet.green.OpenSSL import SSL except ImportError: def wrap_ssl_impl(*a, **kw): raise ImportError( "To use SSL with Eventlet, you must install PyOpenSSL or use Python 2.7 or later.") else: def wrap_ssl_impl(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=None, ssl_version=None, ca_certs=None, do_handshake_on_connect=True, suppress_ragged_eofs=True, ciphers=None): # theoretically the ssl_version could be respected in this line context = SSL.Context(SSL.SSLv23_METHOD) if certfile is not None: context.use_certificate_file(certfile) if keyfile is not None: context.use_privatekey_file(keyfile) context.set_verify(SSL.VERIFY_NONE, lambda *x: True) connection = SSL.Connection(context, sock) if server_side: connection.set_accept_state() else: connection.set_connect_state() return connection eventlet-0.30.2/eventlet/corolocal.py0000644000076500000240000000331514006212666020236 0ustar temotostaff00000000000000import weakref from eventlet import greenthread __all__ = ['get_ident', 'local'] def get_ident(): """ Returns ``id()`` of current greenlet. Useful for debugging.""" return id(greenthread.getcurrent()) # the entire purpose of this class is to store off the constructor # arguments in a local variable without calling __init__ directly class _localbase(object): __slots__ = '_local__args', '_local__greens' def __new__(cls, *args, **kw): self = object.__new__(cls) object.__setattr__(self, '_local__args', (args, kw)) object.__setattr__(self, '_local__greens', weakref.WeakKeyDictionary()) if (args or kw) and (cls.__init__ is object.__init__): raise TypeError("Initialization arguments are not supported") return self def _patch(thrl): greens = object.__getattribute__(thrl, '_local__greens') # until we can store the localdict on greenlets themselves, # we store it in _local__greens on the local object cur = greenthread.getcurrent() if cur not in greens: # must be the first time we've seen this greenlet, call __init__ greens[cur] = {} cls = type(thrl) if cls.__init__ is not object.__init__: args, kw = object.__getattribute__(thrl, '_local__args') thrl.__init__(*args, **kw) object.__setattr__(thrl, '__dict__', greens[cur]) class local(_localbase): def __getattribute__(self, attr): _patch(self) return object.__getattribute__(self, attr) def __setattr__(self, attr, value): _patch(self) return object.__setattr__(self, attr, value) def __delattr__(self, attr): _patch(self) return object.__delattr__(self, attr) eventlet-0.30.2/eventlet/coros.py0000644000076500000240000000403514006212666017406 0ustar temotostaff00000000000000from __future__ import print_function from eventlet import event as _event class metaphore(object): """This is sort of an inverse semaphore: a counter that starts at 0 and waits only if nonzero. It's used to implement a "wait for all" scenario. >>> from eventlet import coros, spawn_n >>> count = coros.metaphore() >>> count.wait() >>> def decrementer(count, id): ... print("{0} decrementing".format(id)) ... count.dec() ... >>> _ = spawn_n(decrementer, count, 'A') >>> _ = spawn_n(decrementer, count, 'B') >>> count.inc(2) >>> count.wait() A decrementing B decrementing """ def __init__(self): self.counter = 0 self.event = _event.Event() # send() right away, else we'd wait on the default 0 count! self.event.send() def inc(self, by=1): """Increment our counter. If this transitions the counter from zero to nonzero, make any subsequent :meth:`wait` call wait. """ assert by > 0 self.counter += by if self.counter == by: # If we just incremented self.counter by 'by', and the new count # equals 'by', then the old value of self.counter was 0. # Transitioning from 0 to a nonzero value means wait() must # actually wait. self.event.reset() def dec(self, by=1): """Decrement our counter. If this transitions the counter from nonzero to zero, a current or subsequent wait() call need no longer wait. """ assert by > 0 self.counter -= by if self.counter <= 0: # Don't leave self.counter < 0, that will screw things up in # future calls. self.counter = 0 # Transitioning from nonzero to 0 means wait() need no longer wait. self.event.send() def wait(self): """Suspend the caller only if our count is nonzero. In that case, resume the caller once the count decrements to zero again. """ self.event.wait() eventlet-0.30.2/eventlet/dagpool.py0000644000076500000240000006327714006212666017723 0ustar temotostaff00000000000000# @file dagpool.py # @author Nat Goodspeed # @date 2016-08-08 # @brief Provide DAGPool class from eventlet.event import Event from eventlet import greenthread import six import collections # value distinguished from any other Python value including None _MISSING = object() class Collision(Exception): """ DAGPool raises Collision when you try to launch two greenthreads with the same key, or post() a result for a key corresponding to a greenthread, or post() twice for the same key. As with KeyError, str(collision) names the key in question. """ pass class PropagateError(Exception): """ When a DAGPool greenthread terminates with an exception instead of returning a result, attempting to retrieve its value raises PropagateError. Attributes: key the key of the greenthread which raised the exception exc the exception object raised by the greenthread """ def __init__(self, key, exc): # initialize base class with a reasonable string message msg = "PropagateError({0}): {1}: {2}" \ .format(key, exc.__class__.__name__, exc) super(PropagateError, self).__init__(msg) self.msg = msg # Unless we set args, this is unpickleable: # https://bugs.python.org/issue1692335 self.args = (key, exc) self.key = key self.exc = exc def __str__(self): return self.msg class DAGPool(object): """ A DAGPool is a pool that constrains greenthreads, not by max concurrency, but by data dependencies. This is a way to implement general DAG dependencies. A simple dependency tree (flowing in either direction) can straightforwardly be implemented using recursion and (e.g.) :meth:`GreenThread.imap() `. What gets complicated is when a given node depends on several other nodes as well as contributing to several other nodes. With DAGPool, you concurrently launch all applicable greenthreads; each will proceed as soon as it has all required inputs. The DAG is implicit in which items are required by each greenthread. Each greenthread is launched in a DAGPool with a key: any value that can serve as a Python dict key. The caller also specifies an iterable of other keys on which this greenthread depends. This iterable may be empty. The greenthread callable must accept (key, results), where: key is its own key results is an iterable of (key, value) pairs. A newly-launched DAGPool greenthread is entered immediately, and can perform any necessary setup work. At some point it will iterate over the (key, value) pairs from the passed 'results' iterable. Doing so blocks the greenthread until a value is available for each of the keys specified in its initial dependencies iterable. These (key, value) pairs are delivered in chronological order, *not* the order in which they are initially specified: each value will be delivered as soon as it becomes available. The value returned by a DAGPool greenthread becomes the value for its key, which unblocks any other greenthreads waiting on that key. If a DAGPool greenthread terminates with an exception instead of returning a value, attempting to retrieve the value raises :class:`PropagateError`, which binds the key of the original greenthread and the original exception. Unless the greenthread attempting to retrieve the value handles PropagateError, that exception will in turn be wrapped in a PropagateError of its own, and so forth. The code that ultimately handles PropagateError can follow the chain of PropagateError.exc attributes to discover the flow of that exception through the DAG of greenthreads. External greenthreads may also interact with a DAGPool. See :meth:`wait_each`, :meth:`waitall`, :meth:`post`. It is not recommended to constrain external DAGPool producer greenthreads in a :class:`GreenPool `: it may be hard to provably avoid deadlock. .. automethod:: __init__ .. automethod:: __getitem__ """ _Coro = collections.namedtuple("_Coro", ("greenthread", "pending")) def __init__(self, preload={}): """ DAGPool can be prepopulated with an initial dict or iterable of (key, value) pairs. These (key, value) pairs are of course immediately available for any greenthread that depends on any of those keys. """ try: # If a dict is passed, copy it. Don't risk a subsequent # modification to passed dict affecting our internal state. iteritems = six.iteritems(preload) except AttributeError: # Not a dict, just an iterable of (key, value) pairs iteritems = preload # Load the initial dict self.values = dict(iteritems) # track greenthreads self.coros = {} # The key to blocking greenthreads is the Event. self.event = Event() def waitall(self): """ waitall() blocks the calling greenthread until there is a value for every DAGPool greenthread launched by :meth:`spawn`. It returns a dict containing all :class:`preload data `, all data from :meth:`post` and all values returned by spawned greenthreads. See also :meth:`wait`. """ # waitall() is an alias for compatibility with GreenPool return self.wait() def wait(self, keys=_MISSING): """ *keys* is an optional iterable of keys. If you omit the argument, it waits for all the keys from :class:`preload data `, from :meth:`post` calls and from :meth:`spawn` calls: in other words, all the keys of which this DAGPool is aware. wait() blocks the calling greenthread until all of the relevant keys have values. wait() returns a dict whose keys are the relevant keys, and whose values come from the *preload* data, from values returned by DAGPool greenthreads or from :meth:`post` calls. If a DAGPool greenthread terminates with an exception, wait() will raise :class:`PropagateError` wrapping that exception. If more than one greenthread terminates with an exception, it is indeterminate which one wait() will raise. If an external greenthread posts a :class:`PropagateError` instance, wait() will raise that PropagateError. If more than one greenthread posts PropagateError, it is indeterminate which one wait() will raise. See also :meth:`wait_each_success`, :meth:`wait_each_exception`. """ # This is mostly redundant with wait_each() functionality. return dict(self.wait_each(keys)) def wait_each(self, keys=_MISSING): """ *keys* is an optional iterable of keys. If you omit the argument, it waits for all the keys from :class:`preload data `, from :meth:`post` calls and from :meth:`spawn` calls: in other words, all the keys of which this DAGPool is aware. wait_each() is a generator producing (key, value) pairs as a value becomes available for each requested key. wait_each() blocks the calling greenthread until the next value becomes available. If the DAGPool was prepopulated with values for any of the relevant keys, of course those can be delivered immediately without waiting. Delivery order is intentionally decoupled from the initial sequence of keys: each value is delivered as soon as it becomes available. If multiple keys are available at the same time, wait_each() delivers each of the ready ones in arbitrary order before blocking again. The DAGPool does not distinguish between a value returned by one of its own greenthreads and one provided by a :meth:`post` call or *preload* data. The wait_each() generator terminates (raises StopIteration) when all specified keys have been delivered. Thus, typical usage might be: :: for key, value in dagpool.wait_each(keys): # process this ready key and value # continue processing now that we've gotten values for all keys By implication, if you pass wait_each() an empty iterable of keys, it returns immediately without yielding anything. If the value to be delivered is a :class:`PropagateError` exception object, the generator raises that PropagateError instead of yielding it. See also :meth:`wait_each_success`, :meth:`wait_each_exception`. """ # Build a local set() and then call _wait_each(). return self._wait_each(self._get_keyset_for_wait_each(keys)) def wait_each_success(self, keys=_MISSING): """ wait_each_success() filters results so that only success values are yielded. In other words, unlike :meth:`wait_each`, wait_each_success() will not raise :class:`PropagateError`. Not every provided (or defaulted) key will necessarily be represented, though naturally the generator will not finish until all have completed. In all other respects, wait_each_success() behaves like :meth:`wait_each`. """ for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)): if not isinstance(value, PropagateError): yield key, value def wait_each_exception(self, keys=_MISSING): """ wait_each_exception() filters results so that only exceptions are yielded. Not every provided (or defaulted) key will necessarily be represented, though naturally the generator will not finish until all have completed. Unlike other DAGPool methods, wait_each_exception() simply yields :class:`PropagateError` instances as values rather than raising them. In all other respects, wait_each_exception() behaves like :meth:`wait_each`. """ for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)): if isinstance(value, PropagateError): yield key, value def _get_keyset_for_wait_each(self, keys): """ wait_each(), wait_each_success() and wait_each_exception() promise that if you pass an iterable of keys, the method will wait for results from those keys -- but if you omit the keys argument, the method will wait for results from all known keys. This helper implements that distinction, returning a set() of the relevant keys. """ if keys is not _MISSING: return set(keys) else: # keys arg omitted -- use all the keys we know about return set(six.iterkeys(self.coros)) | set(six.iterkeys(self.values)) def _wait_each(self, pending): """ When _wait_each() encounters a value of PropagateError, it raises it. In all other respects, _wait_each() behaves like _wait_each_raw(). """ for key, value in self._wait_each_raw(pending): yield key, self._value_or_raise(value) @staticmethod def _value_or_raise(value): # Most methods attempting to deliver PropagateError should raise that # instead of simply returning it. if isinstance(value, PropagateError): raise value return value def _wait_each_raw(self, pending): """ pending is a set() of keys for which we intend to wait. THIS SET WILL BE DESTRUCTIVELY MODIFIED: as each key acquires a value, that key will be removed from the passed 'pending' set. _wait_each_raw() does not treat a PropagateError instance specially: it will be yielded to the caller like any other value. In all other respects, _wait_each_raw() behaves like wait_each(). """ while True: # Before even waiting, show caller any (key, value) pairs that # are already available. Copy 'pending' because we want to be able # to remove items from the original set while iterating. for key in pending.copy(): value = self.values.get(key, _MISSING) if value is not _MISSING: # found one, it's no longer pending pending.remove(key) yield (key, value) if not pending: # Once we've yielded all the caller's keys, done. break # There are still more keys pending, so wait. self.event.wait() def spawn(self, key, depends, function, *args, **kwds): """ Launch the passed *function(key, results, ...)* as a greenthread, passing it: - the specified *key* - an iterable of (key, value) pairs - whatever other positional args or keywords you specify. Iterating over the *results* iterable behaves like calling :meth:`wait_each(depends) `. Returning from *function()* behaves like :meth:`post(key, return_value) `. If *function()* terminates with an exception, that exception is wrapped in :class:`PropagateError` with the greenthread's *key* and (effectively) posted as the value for that key. Attempting to retrieve that value will raise that PropagateError. Thus, if the greenthread with key 'a' terminates with an exception, and greenthread 'b' depends on 'a', when greenthread 'b' attempts to iterate through its *results* argument, it will encounter PropagateError. So by default, an uncaught exception will propagate through all the downstream dependencies. If you pass :meth:`spawn` a key already passed to spawn() or :meth:`post`, spawn() raises :class:`Collision`. """ if key in self.coros or key in self.values: raise Collision(key) # The order is a bit tricky. First construct the set() of keys. pending = set(depends) # It's important that we pass to _wait_each() the same 'pending' set() # that we store in self.coros for this key. The generator-iterator # returned by _wait_each() becomes the function's 'results' iterable. newcoro = greenthread.spawn(self._wrapper, function, key, self._wait_each(pending), *args, **kwds) # Also capture the same (!) set in the new _Coro object for this key. # We must be able to observe ready keys being removed from the set. self.coros[key] = self._Coro(newcoro, pending) def _wrapper(self, function, key, results, *args, **kwds): """ This wrapper runs the top-level function in a DAGPool greenthread, posting its return value (or PropagateError) to the DAGPool. """ try: # call our passed function result = function(key, results, *args, **kwds) except Exception as err: # Wrap any exception it may raise in a PropagateError. result = PropagateError(key, err) finally: # function() has returned (or terminated with an exception). We no # longer need to track this greenthread in self.coros. Remove it # first so post() won't complain about a running greenthread. del self.coros[key] try: # as advertised, try to post() our return value self.post(key, result) except Collision: # if we've already post()ed a result, oh well pass # also, in case anyone cares... return result def spawn_many(self, depends, function, *args, **kwds): """ spawn_many() accepts a single *function* whose parameters are the same as for :meth:`spawn`. The difference is that spawn_many() accepts a dependency dict *depends*. A new greenthread is spawned for each key in the dict. That dict key's value should be an iterable of other keys on which this greenthread depends. If the *depends* dict contains any key already passed to :meth:`spawn` or :meth:`post`, spawn_many() raises :class:`Collision`. It is indeterminate how many of the other keys in *depends* will have successfully spawned greenthreads. """ # Iterate over 'depends' items, relying on self.spawn() not to # context-switch so no one can modify 'depends' along the way. for key, deps in six.iteritems(depends): self.spawn(key, deps, function, *args, **kwds) def kill(self, key): """ Kill the greenthread that was spawned with the specified *key*. If no such greenthread was spawned, raise KeyError. """ # let KeyError, if any, propagate self.coros[key].greenthread.kill() # once killed, remove it del self.coros[key] def post(self, key, value, replace=False): """ post(key, value) stores the passed *value* for the passed *key*. It then causes each greenthread blocked on its results iterable, or on :meth:`wait_each(keys) `, to check for new values. A waiting greenthread might not literally resume on every single post() of a relevant key, but the first post() of a relevant key ensures that it will resume eventually, and when it does it will catch up with all relevant post() calls. Calling post(key, value) when there is a running greenthread with that same *key* raises :class:`Collision`. If you must post(key, value) instead of letting the greenthread run to completion, you must first call :meth:`kill(key) `. The DAGPool implicitly post()s the return value from each of its greenthreads. But a greenthread may explicitly post() a value for its own key, which will cause its return value to be discarded. Calling post(key, value, replace=False) (the default *replace*) when a value for that key has already been posted, by any means, raises :class:`Collision`. Calling post(key, value, replace=True) when a value for that key has already been posted, by any means, replaces the previously-stored value. However, that may make it complicated to reason about the behavior of greenthreads waiting on that key. After a post(key, value1) followed by post(key, value2, replace=True), it is unspecified which pending :meth:`wait_each([key...]) ` calls (or greenthreads iterating over *results* involving that key) will observe *value1* versus *value2*. It is guaranteed that subsequent wait_each([key...]) calls (or greenthreads spawned after that point) will observe *value2*. A successful call to post(key, :class:`PropagateError(key, ExceptionSubclass) `) ensures that any subsequent attempt to retrieve that key's value will raise that PropagateError instance. """ # First, check if we're trying to post() to a key with a running # greenthread. # A DAGPool greenthread is explicitly permitted to post() to its # OWN key. coro = self.coros.get(key, _MISSING) if coro is not _MISSING and coro.greenthread is not greenthread.getcurrent(): # oh oh, trying to post a value for running greenthread from # some other greenthread raise Collision(key) # Here, either we're posting a value for a key with no greenthread or # we're posting from that greenthread itself. # Has somebody already post()ed a value for this key? # Unless replace == True, this is a problem. if key in self.values and not replace: raise Collision(key) # Either we've never before posted a value for this key, or we're # posting with replace == True. # update our database self.values[key] = value # and wake up pending waiters self.event.send() # The comment in Event.reset() says: "it's better to create a new # event rather than reset an old one". Okay, fine. We do want to be # able to support new waiters, so create a new Event. self.event = Event() def __getitem__(self, key): """ __getitem__(key) (aka dagpool[key]) blocks until *key* has a value, then delivers that value. """ # This is a degenerate case of wait_each(). Construct a tuple # containing only this 'key'. wait_each() will yield exactly one (key, # value) pair. Return just its value. for _, value in self.wait_each((key,)): return value def get(self, key, default=None): """ get() returns the value for *key*. If *key* does not yet have a value, get() returns *default*. """ return self._value_or_raise(self.values.get(key, default)) def keys(self): """ Return a snapshot tuple of keys for which we currently have values. """ # Explicitly return a copy rather than an iterator: don't assume our # caller will finish iterating before new values are posted. return tuple(six.iterkeys(self.values)) def items(self): """ Return a snapshot tuple of currently-available (key, value) pairs. """ # Don't assume our caller will finish iterating before new values are # posted. return tuple((key, self._value_or_raise(value)) for key, value in six.iteritems(self.values)) def running(self): """ Return number of running DAGPool greenthreads. This includes greenthreads blocked while iterating through their *results* iterable, that is, greenthreads waiting on values from other keys. """ return len(self.coros) def running_keys(self): """ Return keys for running DAGPool greenthreads. This includes greenthreads blocked while iterating through their *results* iterable, that is, greenthreads waiting on values from other keys. """ # return snapshot; don't assume caller will finish iterating before we # next modify self.coros return tuple(six.iterkeys(self.coros)) def waiting(self): """ Return number of waiting DAGPool greenthreads, that is, greenthreads still waiting on values from other keys. This explicitly does *not* include external greenthreads waiting on :meth:`wait`, :meth:`waitall`, :meth:`wait_each`. """ # n.b. if Event would provide a count of its waiters, we could say # something about external greenthreads as well. # The logic to determine this count is exactly the same as the general # waiting_for() call. return len(self.waiting_for()) # Use _MISSING instead of None as the default 'key' param so we can permit # None as a supported key. def waiting_for(self, key=_MISSING): """ waiting_for(key) returns a set() of the keys for which the DAGPool greenthread spawned with that *key* is still waiting. If you pass a *key* for which no greenthread was spawned, waiting_for() raises KeyError. waiting_for() without argument returns a dict. Its keys are the keys of DAGPool greenthreads still waiting on one or more values. In the returned dict, the value of each such key is the set of other keys for which that greenthread is still waiting. This method allows diagnosing a "hung" DAGPool. If certain greenthreads are making no progress, it's possible that they are waiting on keys for which there is no greenthread and no :meth:`post` data. """ # We may have greenthreads whose 'pending' entry indicates they're # waiting on some keys even though values have now been posted for # some or all of those keys, because those greenthreads have not yet # regained control since values were posted. So make a point of # excluding values that are now available. available = set(six.iterkeys(self.values)) if key is not _MISSING: # waiting_for(key) is semantically different than waiting_for(). # It's just that they both seem to want the same method name. coro = self.coros.get(key, _MISSING) if coro is _MISSING: # Hmm, no running greenthread with this key. But was there # EVER a greenthread with this key? If not, let KeyError # propagate. self.values[key] # Oh good, there's a value for this key. Either the # greenthread finished, or somebody posted a value. Just say # the greenthread isn't waiting for anything. return set() else: # coro is the _Coro for the running greenthread with the # specified key. return coro.pending - available # This is a waiting_for() call, i.e. a general query rather than for a # specific key. # Start by iterating over (key, coro) pairs in self.coros. Generate # (key, pending) pairs in which 'pending' is the set of keys on which # the greenthread believes it's waiting, minus the set of keys that # are now available. Filter out any pair in which 'pending' is empty, # that is, that greenthread will be unblocked next time it resumes. # Make a dict from those pairs. return dict((key, pending) for key, pending in ((key, (coro.pending - available)) for key, coro in six.iteritems(self.coros)) if pending) eventlet-0.30.2/eventlet/db_pool.py0000644000076500000240000003662314006212666017707 0ustar temotostaff00000000000000from __future__ import print_function from collections import deque from contextlib import contextmanager import sys import time from eventlet.pools import Pool from eventlet import timeout from eventlet import hubs from eventlet.hubs.timer import Timer from eventlet.greenthread import GreenThread _MISSING = object() class ConnectTimeout(Exception): pass def cleanup_rollback(conn): conn.rollback() class BaseConnectionPool(Pool): def __init__(self, db_module, min_size=0, max_size=4, max_idle=10, max_age=30, connect_timeout=5, cleanup=cleanup_rollback, *args, **kwargs): """ Constructs a pool with at least *min_size* connections and at most *max_size* connections. Uses *db_module* to construct new connections. The *max_idle* parameter determines how long pooled connections can remain idle, in seconds. After *max_idle* seconds have elapsed without the connection being used, the pool closes the connection. *max_age* is how long any particular connection is allowed to live. Connections that have been open for longer than *max_age* seconds are closed, regardless of idle time. If *max_age* is 0, all connections are closed on return to the pool, reducing it to a concurrency limiter. *connect_timeout* is the duration in seconds that the pool will wait before timing out on connect() to the database. If triggered, the timeout will raise a ConnectTimeout from get(). The remainder of the arguments are used as parameters to the *db_module*'s connection constructor. """ assert(db_module) self._db_module = db_module self._args = args self._kwargs = kwargs self.max_idle = max_idle self.max_age = max_age self.connect_timeout = connect_timeout self._expiration_timer = None self.cleanup = cleanup super(BaseConnectionPool, self).__init__(min_size=min_size, max_size=max_size, order_as_stack=True) def _schedule_expiration(self): """Sets up a timer that will call _expire_old_connections when the oldest connection currently in the free pool is ready to expire. This is the earliest possible time that a connection could expire, thus, the timer will be running as infrequently as possible without missing a possible expiration. If this function is called when a timer is already scheduled, it does nothing. If max_age or max_idle is 0, _schedule_expiration likewise does nothing. """ if self.max_age == 0 or self.max_idle == 0: # expiration is unnecessary because all connections will be expired # on put return if (self._expiration_timer is not None and not getattr(self._expiration_timer, 'called', False)): # the next timer is already scheduled return try: now = time.time() self._expire_old_connections(now) # the last item in the list, because of the stack ordering, # is going to be the most-idle idle_delay = (self.free_items[-1][0] - now) + self.max_idle oldest = min([t[1] for t in self.free_items]) age_delay = (oldest - now) + self.max_age next_delay = min(idle_delay, age_delay) except (IndexError, ValueError): # no free items, unschedule ourselves self._expiration_timer = None return if next_delay > 0: # set up a continuous self-calling loop self._expiration_timer = Timer(next_delay, GreenThread(hubs.get_hub().greenlet).switch, self._schedule_expiration, [], {}) self._expiration_timer.schedule() def _expire_old_connections(self, now): """Iterates through the open connections contained in the pool, closing ones that have remained idle for longer than max_idle seconds, or have been in existence for longer than max_age seconds. *now* is the current time, as returned by time.time(). """ original_count = len(self.free_items) expired = [ conn for last_used, created_at, conn in self.free_items if self._is_expired(now, last_used, created_at)] new_free = [ (last_used, created_at, conn) for last_used, created_at, conn in self.free_items if not self._is_expired(now, last_used, created_at)] self.free_items.clear() self.free_items.extend(new_free) # adjust the current size counter to account for expired # connections self.current_size -= original_count - len(self.free_items) for conn in expired: self._safe_close(conn, quiet=True) def _is_expired(self, now, last_used, created_at): """Returns true and closes the connection if it's expired. """ if (self.max_idle <= 0 or self.max_age <= 0 or now - last_used > self.max_idle or now - created_at > self.max_age): return True return False def _unwrap_connection(self, conn): """If the connection was wrapped by a subclass of BaseConnectionWrapper and is still functional (as determined by the __nonzero__, or __bool__ in python3, method), returns the unwrapped connection. If anything goes wrong with this process, returns None. """ base = None try: if conn: base = conn._base conn._destroy() else: base = None except AttributeError: pass return base def _safe_close(self, conn, quiet=False): """Closes the (already unwrapped) connection, squelching any exceptions. """ try: conn.close() except AttributeError: pass # conn is None, or junk except Exception: if not quiet: print("Connection.close raised: %s" % (sys.exc_info()[1])) def get(self): conn = super(BaseConnectionPool, self).get() # None is a flag value that means that put got called with # something it couldn't use if conn is None: try: conn = self.create() except Exception: # unconditionally increase the free pool because # even if there are waiters, doing a full put # would incur a greenlib switch and thus lose the # exception stack self.current_size -= 1 raise # if the call to get() draws from the free pool, it will come # back as a tuple if isinstance(conn, tuple): _last_used, created_at, conn = conn else: created_at = time.time() # wrap the connection so the consumer can call close() safely wrapped = PooledConnectionWrapper(conn, self) # annotating the wrapper so that when it gets put in the pool # again, we'll know how old it is wrapped._db_pool_created_at = created_at return wrapped def put(self, conn, cleanup=_MISSING): created_at = getattr(conn, '_db_pool_created_at', 0) now = time.time() conn = self._unwrap_connection(conn) if self._is_expired(now, now, created_at): self._safe_close(conn, quiet=False) conn = None elif cleanup is not None: if cleanup is _MISSING: cleanup = self.cleanup # by default, call rollback in case the connection is in the middle # of a transaction. However, rollback has performance implications # so optionally do nothing or call something else like ping try: if conn: cleanup(conn) except Exception as e: # we don't care what the exception was, we just know the # connection is dead print("WARNING: cleanup %s raised: %s" % (cleanup, e)) conn = None except: conn = None raise if conn is not None: super(BaseConnectionPool, self).put((now, created_at, conn)) else: # wake up any waiters with a flag value that indicates # they need to manufacture a connection if self.waiting() > 0: super(BaseConnectionPool, self).put(None) else: # no waiters -- just change the size self.current_size -= 1 self._schedule_expiration() @contextmanager def item(self, cleanup=_MISSING): conn = self.get() try: yield conn finally: self.put(conn, cleanup=cleanup) def clear(self): """Close all connections that this pool still holds a reference to, and removes all references to them. """ if self._expiration_timer: self._expiration_timer.cancel() free_items, self.free_items = self.free_items, deque() for item in free_items: # Free items created using min_size>0 are not tuples. conn = item[2] if isinstance(item, tuple) else item self._safe_close(conn, quiet=True) self.current_size -= 1 def __del__(self): self.clear() class TpooledConnectionPool(BaseConnectionPool): """A pool which gives out :class:`~eventlet.tpool.Proxy`-based database connections. """ def create(self): now = time.time() return now, now, self.connect( self._db_module, self.connect_timeout, *self._args, **self._kwargs) @classmethod def connect(cls, db_module, connect_timeout, *args, **kw): t = timeout.Timeout(connect_timeout, ConnectTimeout()) try: from eventlet import tpool conn = tpool.execute(db_module.connect, *args, **kw) return tpool.Proxy(conn, autowrap_names=('cursor',)) finally: t.cancel() class RawConnectionPool(BaseConnectionPool): """A pool which gives out plain database connections. """ def create(self): now = time.time() return now, now, self.connect( self._db_module, self.connect_timeout, *self._args, **self._kwargs) @classmethod def connect(cls, db_module, connect_timeout, *args, **kw): t = timeout.Timeout(connect_timeout, ConnectTimeout()) try: return db_module.connect(*args, **kw) finally: t.cancel() # default connection pool is the tpool one ConnectionPool = TpooledConnectionPool class GenericConnectionWrapper(object): def __init__(self, baseconn): self._base = baseconn # Proxy all method calls to self._base # FIXME: remove repetition; options to consider: # * for name in (...): # setattr(class, name, lambda self, *a, **kw: getattr(self._base, name)(*a, **kw)) # * def __getattr__(self, name): if name in (...): return getattr(self._base, name) # * other? def __enter__(self): return self._base.__enter__() def __exit__(self, exc, value, tb): return self._base.__exit__(exc, value, tb) def __repr__(self): return self._base.__repr__() _proxy_funcs = ( 'affected_rows', 'autocommit', 'begin', 'change_user', 'character_set_name', 'close', 'commit', 'cursor', 'dump_debug_info', 'errno', 'error', 'errorhandler', 'insert_id', 'literal', 'ping', 'query', 'rollback', 'select_db', 'server_capabilities', 'set_character_set', 'set_isolation_level', 'set_server_option', 'set_sql_mode', 'show_warnings', 'shutdown', 'sqlstate', 'stat', 'store_result', 'string_literal', 'thread_id', 'use_result', 'warning_count', ) for _proxy_fun in GenericConnectionWrapper._proxy_funcs: # excess wrapper for early binding (closure by value) def _wrapper(_proxy_fun=_proxy_fun): def _proxy_method(self, *args, **kwargs): return getattr(self._base, _proxy_fun)(*args, **kwargs) _proxy_method.func_name = _proxy_fun _proxy_method.__name__ = _proxy_fun _proxy_method.__qualname__ = 'GenericConnectionWrapper.' + _proxy_fun return _proxy_method setattr(GenericConnectionWrapper, _proxy_fun, _wrapper(_proxy_fun)) del GenericConnectionWrapper._proxy_funcs del _proxy_fun del _wrapper class PooledConnectionWrapper(GenericConnectionWrapper): """A connection wrapper where: - the close method returns the connection to the pool instead of closing it directly - ``bool(conn)`` returns a reasonable value - returns itself to the pool if it gets garbage collected """ def __init__(self, baseconn, pool): super(PooledConnectionWrapper, self).__init__(baseconn) self._pool = pool def __nonzero__(self): return (hasattr(self, '_base') and bool(self._base)) __bool__ = __nonzero__ def _destroy(self): self._pool = None try: del self._base except AttributeError: pass def close(self): """Return the connection to the pool, and remove the reference to it so that you can't use it again through this wrapper object. """ if self and self._pool: self._pool.put(self) self._destroy() def __del__(self): return # this causes some issues if __del__ is called in the # main coroutine, so for now this is disabled # self.close() class DatabaseConnector(object): """ This is an object which will maintain a collection of database connection pools on a per-host basis. """ def __init__(self, module, credentials, conn_pool=None, *args, **kwargs): """constructor *module* Database module to use. *credentials* Mapping of hostname to connect arguments (e.g. username and password) """ assert(module) self._conn_pool_class = conn_pool if self._conn_pool_class is None: self._conn_pool_class = ConnectionPool self._module = module self._args = args self._kwargs = kwargs # this is a map of hostname to username/password self._credentials = credentials self._databases = {} def credentials_for(self, host): if host in self._credentials: return self._credentials[host] else: return self._credentials.get('default', None) def get(self, host, dbname): """Returns a ConnectionPool to the target host and schema. """ key = (host, dbname) if key not in self._databases: new_kwargs = self._kwargs.copy() new_kwargs['db'] = dbname new_kwargs['host'] = host new_kwargs.update(self.credentials_for(host)) dbpool = self._conn_pool_class( self._module, *self._args, **new_kwargs) self._databases[key] = dbpool return self._databases[key] eventlet-0.30.2/eventlet/debug.py0000644000076500000240000001426614006212666017356 0ustar temotostaff00000000000000"""The debug module contains utilities and functions for better debugging Eventlet-powered applications.""" from __future__ import print_function import os import sys import linecache import re import inspect __all__ = ['spew', 'unspew', 'format_hub_listeners', 'format_hub_timers', 'hub_listener_stacks', 'hub_exceptions', 'tpool_exceptions', 'hub_prevent_multiple_readers', 'hub_timer_stacks', 'hub_blocking_detection'] _token_splitter = re.compile('\W+') class Spew(object): def __init__(self, trace_names=None, show_values=True): self.trace_names = trace_names self.show_values = show_values def __call__(self, frame, event, arg): if event == 'line': lineno = frame.f_lineno if '__file__' in frame.f_globals: filename = frame.f_globals['__file__'] if (filename.endswith('.pyc') or filename.endswith('.pyo')): filename = filename[:-1] name = frame.f_globals['__name__'] line = linecache.getline(filename, lineno) else: name = '[unknown]' try: src = inspect.getsourcelines(frame) line = src[lineno] except IOError: line = 'Unknown code named [%s]. VM instruction #%d' % ( frame.f_code.co_name, frame.f_lasti) if self.trace_names is None or name in self.trace_names: print('%s:%s: %s' % (name, lineno, line.rstrip())) if not self.show_values: return self details = [] tokens = _token_splitter.split(line) for tok in tokens: if tok in frame.f_globals: details.append('%s=%r' % (tok, frame.f_globals[tok])) if tok in frame.f_locals: details.append('%s=%r' % (tok, frame.f_locals[tok])) if details: print("\t%s" % ' '.join(details)) return self def spew(trace_names=None, show_values=False): """Install a trace hook which writes incredibly detailed logs about what code is being executed to stdout. """ sys.settrace(Spew(trace_names, show_values)) def unspew(): """Remove the trace hook installed by spew. """ sys.settrace(None) def format_hub_listeners(): """ Returns a formatted string of the current listeners on the current hub. This can be useful in determining what's going on in the event system, especially when used in conjunction with :func:`hub_listener_stacks`. """ from eventlet import hubs hub = hubs.get_hub() result = ['READERS:'] for l in hub.get_readers(): result.append(repr(l)) result.append('WRITERS:') for l in hub.get_writers(): result.append(repr(l)) return os.linesep.join(result) def format_hub_timers(): """ Returns a formatted string of the current timers on the current hub. This can be useful in determining what's going on in the event system, especially when used in conjunction with :func:`hub_timer_stacks`. """ from eventlet import hubs hub = hubs.get_hub() result = ['TIMERS:'] for l in hub.timers: result.append(repr(l)) return os.linesep.join(result) def hub_listener_stacks(state=False): """Toggles whether or not the hub records the stack when clients register listeners on file descriptors. This can be useful when trying to figure out what the hub is up to at any given moment. To inspect the stacks of the current listeners, call :func:`format_hub_listeners` at critical junctures in the application logic. """ from eventlet import hubs hubs.get_hub().set_debug_listeners(state) def hub_timer_stacks(state=False): """Toggles whether or not the hub records the stack when timers are set. To inspect the stacks of the current timers, call :func:`format_hub_timers` at critical junctures in the application logic. """ from eventlet.hubs import timer timer._g_debug = state def hub_prevent_multiple_readers(state=True): """Toggle prevention of multiple greenlets reading from a socket When multiple greenlets read from the same socket it is often hard to predict which greenlet will receive what data. To achieve resource sharing consider using ``eventlet.pools.Pool`` instead. But if you really know what you are doing you can change the state to ``False`` to stop the hub from protecting against this mistake. """ from eventlet.hubs import hub hub.g_prevent_multiple_readers = state def hub_exceptions(state=True): """Toggles whether the hub prints exceptions that are raised from its timers. This can be useful to see how greenthreads are terminating. """ from eventlet import hubs hubs.get_hub().set_timer_exceptions(state) from eventlet import greenpool greenpool.DEBUG = state def tpool_exceptions(state=False): """Toggles whether tpool itself prints exceptions that are raised from functions that are executed in it, in addition to raising them like it normally does.""" from eventlet import tpool tpool.QUIET = not state def hub_blocking_detection(state=False, resolution=1): """Toggles whether Eventlet makes an effort to detect blocking behavior in an application. It does this by telling the kernel to raise a SIGALARM after a short timeout, and clearing the timeout every time the hub greenlet is resumed. Therefore, any code that runs for a long time without yielding to the hub will get interrupted by the blocking detector (don't use it in production!). The *resolution* argument governs how long the SIGALARM timeout waits in seconds. The implementation uses :func:`signal.setitimer` and can be specified as a floating-point value. The shorter the resolution, the greater the chance of false positives. """ from eventlet import hubs assert resolution > 0 hubs.get_hub().debug_blocking = state hubs.get_hub().debug_blocking_resolution = resolution if not state: hubs.get_hub().block_detect_post() eventlet-0.30.2/eventlet/event.py0000644000076500000240000001653614006212666017413 0ustar temotostaff00000000000000from __future__ import print_function from eventlet import hubs from eventlet.support import greenlets as greenlet __all__ = ['Event'] class NOT_USED: def __repr__(self): return 'NOT_USED' NOT_USED = NOT_USED() class Event(object): """An abstraction where an arbitrary number of coroutines can wait for one event from another. Events are similar to a Queue that can only hold one item, but differ in two important ways: 1. calling :meth:`send` never unschedules the current greenthread 2. :meth:`send` can only be called once; create a new event to send again. They are good for communicating results between coroutines, and are the basis for how :meth:`GreenThread.wait() ` is implemented. >>> from eventlet import event >>> import eventlet >>> evt = event.Event() >>> def baz(b): ... evt.send(b + 1) ... >>> _ = eventlet.spawn_n(baz, 3) >>> evt.wait() 4 """ _result = None _exc = None def __init__(self): self._waiters = set() self.reset() def __str__(self): params = (self.__class__.__name__, hex(id(self)), self._result, self._exc, len(self._waiters)) return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params def reset(self): # this is kind of a misfeature and doesn't work perfectly well, # it's better to create a new event rather than reset an old one # removing documentation so that we don't get new use cases for it assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.' self._result = NOT_USED self._exc = None def ready(self): """ Return true if the :meth:`wait` call will return immediately. Used to avoid waiting for things that might take a while to time out. For example, you can put a bunch of events into a list, and then visit them all repeatedly, calling :meth:`ready` until one returns ``True``, and then you can :meth:`wait` on that one.""" return self._result is not NOT_USED def has_exception(self): return self._exc is not None def has_result(self): return self._result is not NOT_USED and self._exc is None def poll(self, notready=None): if self.ready(): return self.wait() return notready # QQQ make it return tuple (type, value, tb) instead of raising # because # 1) "poll" does not imply raising # 2) it's better not to screw up caller's sys.exc_info() by default # (e.g. if caller wants to calls the function in except or finally) def poll_exception(self, notready=None): if self.has_exception(): return self.wait() return notready def poll_result(self, notready=None): if self.has_result(): return self.wait() return notready def wait(self, timeout=None): """Wait until another coroutine calls :meth:`send`. Returns the value the other coroutine passed to :meth:`send`. >>> import eventlet >>> evt = eventlet.Event() >>> def wait_on(): ... retval = evt.wait() ... print("waited for {0}".format(retval)) >>> _ = eventlet.spawn(wait_on) >>> evt.send('result') >>> eventlet.sleep(0) waited for result Returns immediately if the event has already occurred. >>> evt.wait() 'result' When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). """ current = greenlet.getcurrent() if self._result is NOT_USED: hub = hubs.get_hub() self._waiters.add(current) timer = None if timeout is not None: timer = hub.schedule_call_local(timeout, self._do_send, None, None, current) try: result = hub.switch() if timer is not None: timer.cancel() return result finally: self._waiters.discard(current) if self._exc is not None: current.throw(*self._exc) return self._result def send(self, result=None, exc=None): """Makes arrangements for the waiters to be woken with the result and then returns immediately to the parent. >>> from eventlet import event >>> import eventlet >>> evt = event.Event() >>> def waiter(): ... print('about to wait') ... result = evt.wait() ... print('waited for {0}'.format(result)) >>> _ = eventlet.spawn(waiter) >>> eventlet.sleep(0) about to wait >>> evt.send('a') >>> eventlet.sleep(0) waited for a It is an error to call :meth:`send` multiple times on the same event. >>> evt.send('whoops') Traceback (most recent call last): ... AssertionError: Trying to re-send() an already-triggered event. Use :meth:`reset` between :meth:`send` s to reuse an event object. """ assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.' self._result = result if exc is not None and not isinstance(exc, tuple): exc = (exc, ) self._exc = exc hub = hubs.get_hub() for waiter in self._waiters: hub.schedule_call_global( 0, self._do_send, self._result, self._exc, waiter) def _do_send(self, result, exc, waiter): if waiter in self._waiters: if exc is None: waiter.switch(result) else: waiter.throw(*exc) def send_exception(self, *args): """Same as :meth:`send`, but sends an exception to waiters. The arguments to send_exception are the same as the arguments to ``raise``. If a single exception object is passed in, it will be re-raised when :meth:`wait` is called, generating a new stacktrace. >>> from eventlet import event >>> evt = event.Event() >>> evt.send_exception(RuntimeError()) >>> evt.wait() Traceback (most recent call last): File "", line 1, in File "eventlet/event.py", line 120, in wait current.throw(*self._exc) RuntimeError If it's important to preserve the entire original stack trace, you must pass in the entire :func:`sys.exc_info` tuple. >>> import sys >>> evt = event.Event() >>> try: ... raise RuntimeError() ... except RuntimeError: ... evt.send_exception(*sys.exc_info()) ... >>> evt.wait() Traceback (most recent call last): File "", line 1, in File "eventlet/event.py", line 120, in wait current.throw(*self._exc) File "", line 2, in RuntimeError Note that doing so stores a traceback object directly on the Event object, which may cause reference cycles. See the :func:`sys.exc_info` documentation. """ # the arguments and the same as for greenlet.throw return self.send(None, args) eventlet-0.30.2/eventlet/green/0000755000076500000240000000000014017673044017010 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/green/BaseHTTPServer.py0000644000076500000240000000053214006212666022120 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import socket from eventlet.green import SocketServer import six patcher.inject( 'BaseHTTPServer' if six.PY2 else 'http.server', globals(), ('socket', socket), ('SocketServer', SocketServer), ('socketserver', SocketServer)) del patcher if __name__ == '__main__': test() eventlet-0.30.2/eventlet/green/CGIHTTPServer.py0000644000076500000240000000105014006212666021644 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import BaseHTTPServer from eventlet.green import SimpleHTTPServer from eventlet.green import urllib from eventlet.green import select test = None # bind prior to patcher.inject to silence pyflakes warning below patcher.inject( 'CGIHTTPServer', globals(), ('BaseHTTPServer', BaseHTTPServer), ('SimpleHTTPServer', SimpleHTTPServer), ('urllib', urllib), ('select', select)) del patcher if __name__ == '__main__': test() # pyflakes false alarm here unless test = None above eventlet-0.30.2/eventlet/green/MySQLdb.py0000644000076500000240000000226114006212666020633 0ustar temotostaff00000000000000__MySQLdb = __import__('MySQLdb') __all__ = __MySQLdb.__all__ __patched__ = ["connect", "Connect", 'Connection', 'connections'] from eventlet.patcher import slurp_properties slurp_properties( __MySQLdb, globals(), ignore=__patched__, srckeys=dir(__MySQLdb)) from eventlet import tpool __orig_connections = __import__('MySQLdb.connections').connections def Connection(*args, **kw): conn = tpool.execute(__orig_connections.Connection, *args, **kw) return tpool.Proxy(conn, autowrap_names=('cursor',)) connect = Connect = Connection # replicate the MySQLdb.connections module but with a tpooled Connection factory class MySQLdbConnectionsModule(object): pass connections = MySQLdbConnectionsModule() for var in dir(__orig_connections): if not var.startswith('__'): setattr(connections, var, getattr(__orig_connections, var)) connections.Connection = Connection cursors = __import__('MySQLdb.cursors').cursors converters = __import__('MySQLdb.converters').converters # TODO support instantiating cursors.FooCursor objects directly # TODO though this is a low priority, it would be nice if we supported # subclassing eventlet.green.MySQLdb.connections.Connection eventlet-0.30.2/eventlet/green/OpenSSL/0000755000076500000240000000000014017673044020273 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/green/OpenSSL/SSL.py0000644000076500000240000001066514006212666021313 0ustar temotostaff00000000000000from OpenSSL import SSL as orig_SSL from OpenSSL.SSL import * from eventlet.support import get_errno from eventlet import greenio from eventlet.hubs import trampoline import socket class GreenConnection(greenio.GreenSocket): """ Nonblocking wrapper for SSL.Connection objects. """ def __init__(self, ctx, sock=None): if sock is not None: fd = orig_SSL.Connection(ctx, sock) else: # if we're given a Connection object directly, use it; # this is used in the inherited accept() method fd = ctx super(ConnectionType, self).__init__(fd) def do_handshake(self): """ Perform an SSL handshake (usually called after renegotiate or one of set_accept_state or set_accept_state). This can raise the same exceptions as send and recv. """ if self.act_non_blocking: return self.fd.do_handshake() while True: try: return self.fd.do_handshake() except WantReadError: trampoline(self.fd.fileno(), read=True, timeout=self.gettimeout(), timeout_exc=socket.timeout) except WantWriteError: trampoline(self.fd.fileno(), write=True, timeout=self.gettimeout(), timeout_exc=socket.timeout) def dup(self): raise NotImplementedError("Dup not supported on SSL sockets") def makefile(self, mode='r', bufsize=-1): raise NotImplementedError("Makefile not supported on SSL sockets") def read(self, size): """Works like a blocking call to SSL_read(), whose behavior is described here: http://www.openssl.org/docs/ssl/SSL_read.html""" if self.act_non_blocking: return self.fd.read(size) while True: try: return self.fd.read(size) except WantReadError: trampoline(self.fd.fileno(), read=True, timeout=self.gettimeout(), timeout_exc=socket.timeout) except WantWriteError: trampoline(self.fd.fileno(), write=True, timeout=self.gettimeout(), timeout_exc=socket.timeout) except SysCallError as e: if get_errno(e) == -1 or get_errno(e) > 0: return '' recv = read def write(self, data): """Works like a blocking call to SSL_write(), whose behavior is described here: http://www.openssl.org/docs/ssl/SSL_write.html""" if not data: return 0 # calling SSL_write() with 0 bytes to be sent is undefined if self.act_non_blocking: return self.fd.write(data) while True: try: return self.fd.write(data) except WantReadError: trampoline(self.fd.fileno(), read=True, timeout=self.gettimeout(), timeout_exc=socket.timeout) except WantWriteError: trampoline(self.fd.fileno(), write=True, timeout=self.gettimeout(), timeout_exc=socket.timeout) send = write def sendall(self, data): """Send "all" data on the connection. This calls send() repeatedly until all data is sent. If an error occurs, it's impossible to tell how much data has been sent. No return value.""" tail = self.send(data) while tail < len(data): tail += self.send(data[tail:]) def shutdown(self): if self.act_non_blocking: return self.fd.shutdown() while True: try: return self.fd.shutdown() except WantReadError: trampoline(self.fd.fileno(), read=True, timeout=self.gettimeout(), timeout_exc=socket.timeout) except WantWriteError: trampoline(self.fd.fileno(), write=True, timeout=self.gettimeout(), timeout_exc=socket.timeout) Connection = ConnectionType = GreenConnection del greenio eventlet-0.30.2/eventlet/green/OpenSSL/__init__.py0000644000076500000240000000036614006212666022406 0ustar temotostaff00000000000000from . import crypto from . import SSL try: # pyopenssl tsafe module was deprecated and removed in v20.0.0 # https://github.com/pyca/pyopenssl/pull/913 from . import tsafe except ImportError: pass from .version import __version__ eventlet-0.30.2/eventlet/green/OpenSSL/crypto.py0000644000076500000240000000003514006212666022160 0ustar temotostaff00000000000000from OpenSSL.crypto import * eventlet-0.30.2/eventlet/green/OpenSSL/tsafe.py0000644000076500000240000000003414006212666021741 0ustar temotostaff00000000000000from OpenSSL.tsafe import * eventlet-0.30.2/eventlet/green/OpenSSL/version.py0000644000076500000240000000006114006212666022324 0ustar temotostaff00000000000000from OpenSSL.version import __version__, __doc__ eventlet-0.30.2/eventlet/green/Queue.py0000644000076500000240000000150114006212666020440 0ustar temotostaff00000000000000from eventlet import queue __all__ = ['Empty', 'Full', 'LifoQueue', 'PriorityQueue', 'Queue'] __patched__ = ['LifoQueue', 'PriorityQueue', 'Queue'] # these classes exist to paper over the major operational difference between # eventlet.queue.Queue and the stdlib equivalents class Queue(queue.Queue): def __init__(self, maxsize=0): if maxsize == 0: maxsize = None super(Queue, self).__init__(maxsize) class PriorityQueue(queue.PriorityQueue): def __init__(self, maxsize=0): if maxsize == 0: maxsize = None super(PriorityQueue, self).__init__(maxsize) class LifoQueue(queue.LifoQueue): def __init__(self, maxsize=0): if maxsize == 0: maxsize = None super(LifoQueue, self).__init__(maxsize) Empty = queue.Empty Full = queue.Full eventlet-0.30.2/eventlet/green/SimpleHTTPServer.py0000644000076500000240000000042514006212666022500 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import BaseHTTPServer from eventlet.green import urllib patcher.inject( 'SimpleHTTPServer', globals(), ('BaseHTTPServer', BaseHTTPServer), ('urllib', urllib)) del patcher if __name__ == '__main__': test() eventlet-0.30.2/eventlet/green/SocketServer.py0000644000076500000240000000055514006212666022003 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import socket from eventlet.green import select from eventlet.green import threading import six patcher.inject( 'SocketServer' if six.PY2 else 'socketserver', globals(), ('socket', socket), ('select', select), ('threading', threading)) # QQQ ForkingMixIn should be fixed to use green waitpid? eventlet-0.30.2/eventlet/green/__init__.py0000644000076500000240000000012414006212666021113 0ustar temotostaff00000000000000# this package contains modules from the standard library converted to use eventlet eventlet-0.30.2/eventlet/green/_socket_nodns.py0000644000076500000240000000143314006212666022210 0ustar temotostaff00000000000000__socket = __import__('socket') __all__ = __socket.__all__ __patched__ = ['fromfd', 'socketpair', 'ssl', 'socket', 'timeout'] import eventlet.patcher eventlet.patcher.slurp_properties(__socket, globals(), ignore=__patched__, srckeys=dir(__socket)) os = __import__('os') import sys from eventlet import greenio socket = greenio.GreenSocket _GLOBAL_DEFAULT_TIMEOUT = greenio._GLOBAL_DEFAULT_TIMEOUT timeout = greenio.socket_timeout try: __original_fromfd__ = __socket.fromfd def fromfd(*args): return socket(__original_fromfd__(*args)) except AttributeError: pass try: __original_socketpair__ = __socket.socketpair def socketpair(*args): one, two = __original_socketpair__(*args) return socket(one), socket(two) except AttributeError: pass eventlet-0.30.2/eventlet/green/asynchat.py0000644000076500000240000000032414006212666021170 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import asyncore from eventlet.green import socket patcher.inject( 'asynchat', globals(), ('asyncore', asyncore), ('socket', socket)) del patcher eventlet-0.30.2/eventlet/green/asyncore.py0000644000076500000240000000040214006212666021176 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import select from eventlet.green import socket from eventlet.green import time patcher.inject( "asyncore", globals(), ('select', select), ('socket', socket), ('time', time)) del patcher eventlet-0.30.2/eventlet/green/builtin.py0000644000076500000240000000240714006212666021030 0ustar temotostaff00000000000000""" In order to detect a filehandle that's been closed, our only clue may be the operating system returning the same filehandle in response to some other operation. The builtins 'file' and 'open' are patched to collaborate with the notify_opened protocol. """ builtins_orig = __builtins__ from eventlet import hubs from eventlet.hubs import hub from eventlet.patcher import slurp_properties import sys import six __all__ = dir(builtins_orig) __patched__ = ['open'] if six.PY2: __patched__ += ['file'] slurp_properties(builtins_orig, globals(), ignore=__patched__, srckeys=dir(builtins_orig)) hubs.get_hub() if six.PY2: __original_file = file class file(__original_file): def __init__(self, *args, **kwargs): super(file, self).__init__(*args, **kwargs) hubs.notify_opened(self.fileno()) __original_open = open __opening = False def open(*args, **kwargs): global __opening result = __original_open(*args, **kwargs) if not __opening: # This is incredibly ugly. 'open' is used under the hood by # the import process. So, ensure we don't wind up in an # infinite loop. __opening = True hubs.notify_opened(result.fileno()) __opening = False return result eventlet-0.30.2/eventlet/green/ftplib.py0000644000076500000240000000046314006212666020642 0ustar temotostaff00000000000000from eventlet import patcher # *NOTE: there might be some funny business with the "SOCKS" module # if it even still exists from eventlet.green import socket patcher.inject('ftplib', globals(), ('socket', socket)) del patcher # Run test program when run as a script if __name__ == '__main__': test() eventlet-0.30.2/eventlet/green/http/0000755000076500000240000000000014017673044017767 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/green/http/__init__.py0000644000076500000240000002113114006212666022073 0ustar temotostaff00000000000000# This is part of Python source code with Eventlet-specific modifications. # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights # Reserved # # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 # -------------------------------------------- # # 1. This LICENSE AGREEMENT is between the Python Software Foundation # ("PSF"), and the Individual or Organization ("Licensee") accessing and # otherwise using this software ("Python") in source or binary form and # its associated documentation. # # 2. Subject to the terms and conditions of this License Agreement, PSF hereby # grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, # analyze, test, perform and/or display publicly, prepare derivative works, # distribute, and otherwise use Python alone or in any derivative version, # provided, however, that PSF's License Agreement and PSF's notice of copyright, # i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights # Reserved" are retained in Python alone or in any derivative version prepared by # Licensee. # # 3. In the event Licensee prepares a derivative work that is based on # or incorporates Python or any part thereof, and wants to make # the derivative work available to others as provided herein, then # Licensee hereby agrees to include in any such work a brief summary of # the changes made to Python. # # 4. PSF is making Python available to Licensee on an "AS IS" # basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR # IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND # DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS # FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT # INFRINGE ANY THIRD PARTY RIGHTS. # # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON # FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS # A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, # OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. # # 6. This License Agreement will automatically terminate upon a material # breach of its terms and conditions. # # 7. Nothing in this License Agreement shall be deemed to create any # relationship of agency, partnership, or joint venture between PSF and # Licensee. This License Agreement does not grant permission to use PSF # trademarks or trade name in a trademark sense to endorse or promote # products or services of Licensee, or any third party. # # 8. By copying, installing or otherwise using Python, Licensee # agrees to be bound by the terms and conditions of this License # Agreement. import six assert six.PY3, 'This is a Python 3 module' from enum import IntEnum __all__ = ['HTTPStatus'] class HTTPStatus(IntEnum): """HTTP status codes and reason phrases Status codes from the following RFCs are all observed: * RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616 * RFC 6585: Additional HTTP Status Codes * RFC 3229: Delta encoding in HTTP * RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518 * RFC 5842: Binding Extensions to WebDAV * RFC 7238: Permanent Redirect * RFC 2295: Transparent Content Negotiation in HTTP * RFC 2774: An HTTP Extension Framework """ def __new__(cls, value, phrase, description=''): obj = int.__new__(cls, value) obj._value_ = value obj.phrase = phrase obj.description = description return obj # informational CONTINUE = 100, 'Continue', 'Request received, please continue' SWITCHING_PROTOCOLS = (101, 'Switching Protocols', 'Switching to new protocol; obey Upgrade header') PROCESSING = 102, 'Processing' # success OK = 200, 'OK', 'Request fulfilled, document follows' CREATED = 201, 'Created', 'Document created, URL follows' ACCEPTED = (202, 'Accepted', 'Request accepted, processing continues off-line') NON_AUTHORITATIVE_INFORMATION = (203, 'Non-Authoritative Information', 'Request fulfilled from cache') NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows' RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input' PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows' MULTI_STATUS = 207, 'Multi-Status' ALREADY_REPORTED = 208, 'Already Reported' IM_USED = 226, 'IM Used' # redirection MULTIPLE_CHOICES = (300, 'Multiple Choices', 'Object has several resources -- see URI list') MOVED_PERMANENTLY = (301, 'Moved Permanently', 'Object moved permanently -- see URI list') FOUND = 302, 'Found', 'Object moved temporarily -- see URI list' SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list' NOT_MODIFIED = (304, 'Not Modified', 'Document has not changed since given time') USE_PROXY = (305, 'Use Proxy', 'You must use proxy specified in Location to access this resource') TEMPORARY_REDIRECT = (307, 'Temporary Redirect', 'Object moved temporarily -- see URI list') PERMANENT_REDIRECT = (308, 'Permanent Redirect', 'Object moved temporarily -- see URI list') # client error BAD_REQUEST = (400, 'Bad Request', 'Bad request syntax or unsupported method') UNAUTHORIZED = (401, 'Unauthorized', 'No permission -- see authorization schemes') PAYMENT_REQUIRED = (402, 'Payment Required', 'No payment -- see charging schemes') FORBIDDEN = (403, 'Forbidden', 'Request forbidden -- authorization will not help') NOT_FOUND = (404, 'Not Found', 'Nothing matches the given URI') METHOD_NOT_ALLOWED = (405, 'Method Not Allowed', 'Specified method is invalid for this resource') NOT_ACCEPTABLE = (406, 'Not Acceptable', 'URI not available in preferred format') PROXY_AUTHENTICATION_REQUIRED = (407, 'Proxy Authentication Required', 'You must authenticate with this proxy before proceeding') REQUEST_TIMEOUT = (408, 'Request Timeout', 'Request timed out; try again later') CONFLICT = 409, 'Conflict', 'Request conflict' GONE = (410, 'Gone', 'URI no longer exists and has been permanently removed') LENGTH_REQUIRED = (411, 'Length Required', 'Client must specify Content-Length') PRECONDITION_FAILED = (412, 'Precondition Failed', 'Precondition in headers is false') REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large', 'Entity is too large') REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long', 'URI is too long') UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type', 'Entity body in unsupported format') REQUESTED_RANGE_NOT_SATISFIABLE = (416, 'Requested Range Not Satisfiable', 'Cannot satisfy request range') EXPECTATION_FAILED = (417, 'Expectation Failed', 'Expect condition could not be satisfied') UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity' LOCKED = 423, 'Locked' FAILED_DEPENDENCY = 424, 'Failed Dependency' UPGRADE_REQUIRED = 426, 'Upgrade Required' PRECONDITION_REQUIRED = (428, 'Precondition Required', 'The origin server requires the request to be conditional') TOO_MANY_REQUESTS = (429, 'Too Many Requests', 'The user has sent too many requests in ' 'a given amount of time ("rate limiting")') REQUEST_HEADER_FIELDS_TOO_LARGE = (431, 'Request Header Fields Too Large', 'The server is unwilling to process the request because its header ' 'fields are too large') # server errors INTERNAL_SERVER_ERROR = (500, 'Internal Server Error', 'Server got itself in trouble') NOT_IMPLEMENTED = (501, 'Not Implemented', 'Server does not support this operation') BAD_GATEWAY = (502, 'Bad Gateway', 'Invalid responses from another server/proxy') SERVICE_UNAVAILABLE = (503, 'Service Unavailable', 'The server cannot process the request due to a high load') GATEWAY_TIMEOUT = (504, 'Gateway Timeout', 'The gateway server did not receive a timely response') HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported', 'Cannot fulfill request') VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates' INSUFFICIENT_STORAGE = 507, 'Insufficient Storage' LOOP_DETECTED = 508, 'Loop Detected' NOT_EXTENDED = 510, 'Not Extended' NETWORK_AUTHENTICATION_REQUIRED = (511, 'Network Authentication Required', 'The client needs to authenticate to gain network access') eventlet-0.30.2/eventlet/green/http/client.py0000644000076500000240000016255214006212666021627 0ustar temotostaff00000000000000# This is part of Python source code with Eventlet-specific modifications. # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights # Reserved # # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 # -------------------------------------------- # # 1. This LICENSE AGREEMENT is between the Python Software Foundation # ("PSF"), and the Individual or Organization ("Licensee") accessing and # otherwise using this software ("Python") in source or binary form and # its associated documentation. # # 2. Subject to the terms and conditions of this License Agreement, PSF hereby # grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, # analyze, test, perform and/or display publicly, prepare derivative works, # distribute, and otherwise use Python alone or in any derivative version, # provided, however, that PSF's License Agreement and PSF's notice of copyright, # i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights # Reserved" are retained in Python alone or in any derivative version prepared by # Licensee. # # 3. In the event Licensee prepares a derivative work that is based on # or incorporates Python or any part thereof, and wants to make # the derivative work available to others as provided herein, then # Licensee hereby agrees to include in any such work a brief summary of # the changes made to Python. # # 4. PSF is making Python available to Licensee on an "AS IS" # basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR # IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND # DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS # FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT # INFRINGE ANY THIRD PARTY RIGHTS. # # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON # FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS # A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, # OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. # # 6. This License Agreement will automatically terminate upon a material # breach of its terms and conditions. # # 7. Nothing in this License Agreement shall be deemed to create any # relationship of agency, partnership, or joint venture between PSF and # Licensee. This License Agreement does not grant permission to use PSF # trademarks or trade name in a trademark sense to endorse or promote # products or services of Licensee, or any third party. # # 8. By copying, installing or otherwise using Python, Licensee # agrees to be bound by the terms and conditions of this License # Agreement. """HTTP/1.1 client library HTTPConnection goes through a number of "states", which define when a client may legally make another request or fetch the response for a particular request. This diagram details these state transitions: (null) | | HTTPConnection() v Idle | | putrequest() v Request-started | | ( putheader() )* endheaders() v Request-sent |\_____________________________ | | getresponse() raises | response = getresponse() | ConnectionError v v Unread-response Idle [Response-headers-read] |\____________________ | | | response.read() | putrequest() v v Idle Req-started-unread-response ______/| / | response.read() | | ( putheader() )* endheaders() v v Request-started Req-sent-unread-response | | response.read() v Request-sent This diagram presents the following rules: -- a second request may not be started until {response-headers-read} -- a response [object] cannot be retrieved until {request-sent} -- there is no differentiation between an unread response body and a partially read response body Note: this enforcement is applied by the HTTPConnection class. The HTTPResponse class does not enforce this state machine, which implies sophisticated clients may accelerate the request/response pipeline. Caution should be taken, though: accelerating the states beyond the above pattern may imply knowledge of the server's connection-close behavior for certain requests. For example, it is impossible to tell whether the server will close the connection UNTIL the response headers have been read; this means that further requests cannot be placed into the pipeline until it is known that the server will NOT be closing the connection. Logical State __state __response ------------- ------- ---------- Idle _CS_IDLE None Request-started _CS_REQ_STARTED None Request-sent _CS_REQ_SENT None Unread-response _CS_IDLE Req-started-unread-response _CS_REQ_STARTED Req-sent-unread-response _CS_REQ_SENT """ import email.parser import email.message import io import re import collections from urllib.parse import urlsplit from eventlet.green import http, os, socket # HTTPMessage, parse_headers(), and the HTTP status code constants are # intentionally omitted for simplicity __all__ = ["HTTPResponse", "HTTPConnection", "HTTPException", "NotConnected", "UnknownProtocol", "UnknownTransferEncoding", "UnimplementedFileMode", "IncompleteRead", "InvalidURL", "ImproperConnectionState", "CannotSendRequest", "CannotSendHeader", "ResponseNotReady", "BadStatusLine", "LineTooLong", "RemoteDisconnected", "error", "responses"] HTTP_PORT = 80 HTTPS_PORT = 443 _UNKNOWN = 'UNKNOWN' # connection states _CS_IDLE = 'Idle' _CS_REQ_STARTED = 'Request-started' _CS_REQ_SENT = 'Request-sent' # hack to maintain backwards compatibility globals().update(http.HTTPStatus.__members__) # another hack to maintain backwards compatibility # Mapping status codes to official W3C names responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()} # maximal amount of data to read at one time in _safe_read MAXAMOUNT = 1048576 # maximal line length when calling readline(). _MAXLINE = 65536 _MAXHEADERS = 100 # Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2) # # VCHAR = %x21-7E # obs-text = %x80-FF # header-field = field-name ":" OWS field-value OWS # field-name = token # field-value = *( field-content / obs-fold ) # field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] # field-vchar = VCHAR / obs-text # # obs-fold = CRLF 1*( SP / HTAB ) # ; obsolete line folding # ; see Section 3.2.4 # token = 1*tchar # # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" # / DIGIT / ALPHA # ; any VCHAR, except delimiters # # VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1 # the patterns for both name and value are more leniant than RFC # definitions to allow for backwards compatibility # Eventlet change: match used instead of fullmatch for Python 3.3 compatibility _is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*\Z').match _is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search # We always set the Content-Length header for these methods because some # servers will otherwise respond with a 411 _METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'} def _encode(data, name='data'): """Call data.encode("latin-1") but show a better error message.""" try: return data.encode("latin-1") except UnicodeEncodeError as err: raise UnicodeEncodeError( err.encoding, err.object, err.start, err.end, "%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') " "if you want to send it encoded in UTF-8." % (name.title(), data[err.start:err.end], name)) from None class HTTPMessage(email.message.Message): # XXX The only usage of this method is in # http.server.CGIHTTPRequestHandler. Maybe move the code there so # that it doesn't need to be part of the public API. The API has # never been defined so this could cause backwards compatibility # issues. def getallmatchingheaders(self, name): """Find all header lines matching a given header name. Look through the list of headers and find all lines matching a given header name (and their continuation lines). A list of the lines is returned, without interpretation. If the header does not occur, an empty list is returned. If the header occurs multiple times, all occurrences are returned. Case is not important in the header name. """ name = name.lower() + ':' n = len(name) lst = [] hit = 0 for line in self.keys(): if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: lst.append(line) return lst def parse_headers(fp, _class=HTTPMessage): """Parses only RFC2822 headers from a file pointer. email Parser wants to see strings rather than bytes. But a TextIOWrapper around self.rfile would buffer too many bytes from the stream, bytes which we later need to read as bytes. So we read the correct bytes here, as bytes, for email Parser to parse. """ headers = [] while True: line = fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("header line") headers.append(line) if len(headers) > _MAXHEADERS: raise HTTPException("got more than %d headers" % _MAXHEADERS) if line in (b'\r\n', b'\n', b''): break hstring = b''.join(headers).decode('iso-8859-1') return email.parser.Parser(_class=_class).parsestr(hstring) class HTTPResponse(io.BufferedIOBase): # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details. # The bytes from the socket object are iso-8859-1 strings. # See RFC 2616 sec 2.2 which notes an exception for MIME-encoded # text following RFC 2047. The basic status line parsing only # accepts iso-8859-1. def __init__(self, sock, debuglevel=0, method=None, url=None): # If the response includes a content-length header, we need to # make sure that the client doesn't read more than the # specified number of bytes. If it does, it will block until # the server times out and closes the connection. This will # happen if a self.fp.read() is done (without a size) whether # self.fp is buffered or not. So, no self.fp.read() by # clients unless they know what they are doing. self.fp = sock.makefile("rb") self.debuglevel = debuglevel self._method = method # The HTTPResponse object is returned via urllib. The clients # of http and urllib expect different attributes for the # headers. headers is used here and supports urllib. msg is # provided as a backwards compatibility layer for http # clients. self.headers = self.msg = None # from the Status-Line of the response self.version = _UNKNOWN # HTTP-Version self.status = _UNKNOWN # Status-Code self.reason = _UNKNOWN # Reason-Phrase self.chunked = _UNKNOWN # is "chunked" being used? self.chunk_left = _UNKNOWN # bytes left to read in current chunk self.length = _UNKNOWN # number of bytes left in response self.will_close = _UNKNOWN # conn will close at end of response def _read_status(self): line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1") if len(line) > _MAXLINE: raise LineTooLong("status line") if self.debuglevel > 0: print("reply:", repr(line)) if not line: # Presumably, the server closed the connection before # sending a valid response. raise RemoteDisconnected("Remote end closed connection without" " response") try: version, status, reason = line.split(None, 2) except ValueError: try: version, status = line.split(None, 1) reason = "" except ValueError: # empty version will cause next test to fail. version = "" if not version.startswith("HTTP/"): self._close_conn() raise BadStatusLine(line) # The status code is a three-digit number try: status = int(status) if status < 100 or status > 999: raise BadStatusLine(line) except ValueError: raise BadStatusLine(line) return version, status, reason def begin(self): if self.headers is not None: # we've already started reading the response return # read until we get a non-100 response while True: version, status, reason = self._read_status() if status != CONTINUE: break # skip the header from the 100 response while True: skip = self.fp.readline(_MAXLINE + 1) if len(skip) > _MAXLINE: raise LineTooLong("header line") skip = skip.strip() if not skip: break if self.debuglevel > 0: print("header:", skip) self.code = self.status = status self.reason = reason.strip() if version in ("HTTP/1.0", "HTTP/0.9"): # Some servers might still return "0.9", treat it as 1.0 anyway self.version = 10 elif version.startswith("HTTP/1."): self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1 else: raise UnknownProtocol(version) self.headers = self.msg = parse_headers(self.fp) if self.debuglevel > 0: for hdr in self.headers: print("header:", hdr, end=" ") # are we using the chunked-style of transfer encoding? tr_enc = self.headers.get("transfer-encoding") if tr_enc and tr_enc.lower() == "chunked": self.chunked = True self.chunk_left = None else: self.chunked = False # will the connection close at the end of the response? self.will_close = self._check_close() # do we have a Content-Length? # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" self.length = None length = self.headers.get("content-length") # are we using the chunked-style of transfer encoding? tr_enc = self.headers.get("transfer-encoding") if length and not self.chunked: try: self.length = int(length) except ValueError: self.length = None else: if self.length < 0: # ignore nonsensical negative lengths self.length = None else: self.length = None # does the body have a fixed length? (of zero) if (status == NO_CONTENT or status == NOT_MODIFIED or 100 <= status < 200 or # 1xx codes self._method == "HEAD"): self.length = 0 # if the connection remains open, and we aren't using chunked, and # a content-length was not provided, then assume that the connection # WILL close. if (not self.will_close and not self.chunked and self.length is None): self.will_close = True def _check_close(self): conn = self.headers.get("connection") if self.version == 11: # An HTTP/1.1 proxy is assumed to stay open unless # explicitly closed. conn = self.headers.get("connection") if conn and "close" in conn.lower(): return True return False # Some HTTP/1.0 implementations have support for persistent # connections, using rules different than HTTP/1.1. # For older HTTP, Keep-Alive indicates persistent connection. if self.headers.get("keep-alive"): return False # At least Akamai returns a "Connection: Keep-Alive" header, # which was supposed to be sent by the client. if conn and "keep-alive" in conn.lower(): return False # Proxy-Connection is a netscape hack. pconn = self.headers.get("proxy-connection") if pconn and "keep-alive" in pconn.lower(): return False # otherwise, assume it will close return True def _close_conn(self): fp = self.fp self.fp = None fp.close() def close(self): try: super().close() # set "closed" flag finally: if self.fp: self._close_conn() # These implementations are for the benefit of io.BufferedReader. # XXX This class should probably be revised to act more like # the "raw stream" that BufferedReader expects. def flush(self): super().flush() if self.fp: self.fp.flush() def readable(self): """Always returns True""" return True # End of "raw stream" methods def isclosed(self): """True if the connection is closed.""" # NOTE: it is possible that we will not ever call self.close(). This # case occurs when will_close is TRUE, length is None, and we # read up to the last byte, but NOT past it. # # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be # called, meaning self.isclosed() is meaningful. return self.fp is None def read(self, amt=None): if self.fp is None: return b"" if self._method == "HEAD": self._close_conn() return b"" if amt is not None: # Amount is given, implement using readinto b = bytearray(amt) n = self.readinto(b) return memoryview(b)[:n].tobytes() else: # Amount is not given (unbounded read) so we must check self.length # and self.chunked if self.chunked: return self._readall_chunked() if self.length is None: s = self.fp.read() else: try: s = self._safe_read(self.length) except IncompleteRead: self._close_conn() raise self.length = 0 self._close_conn() # we read everything return s def readinto(self, b): """Read up to len(b) bytes into bytearray b and return the number of bytes read. """ if self.fp is None: return 0 if self._method == "HEAD": self._close_conn() return 0 if self.chunked: return self._readinto_chunked(b) if self.length is not None: if len(b) > self.length: # clip the read to the "end of response" b = memoryview(b)[0:self.length] # we do not use _safe_read() here because this may be a .will_close # connection, and the user is reading more bytes than will be provided # (for example, reading in 1k chunks) n = self.fp.readinto(b) if not n and b: # Ideally, we would raise IncompleteRead if the content-length # wasn't satisfied, but it might break compatibility. self._close_conn() elif self.length is not None: self.length -= n if not self.length: self._close_conn() return n def _read_next_chunk_size(self): # Read the next chunk size from the file line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("chunk size") i = line.find(b";") if i >= 0: line = line[:i] # strip chunk-extensions try: return int(line, 16) except ValueError: # close the connection as protocol synchronisation is # probably lost self._close_conn() raise def _read_and_discard_trailer(self): # read and discard trailer up to the CRLF terminator ### note: we shouldn't have any trailers! while True: line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("trailer line") if not line: # a vanishingly small number of sites EOF without # sending the trailer break if line in (b'\r\n', b'\n', b''): break def _get_chunk_left(self): # return self.chunk_left, reading a new chunk if necessary. # chunk_left == 0: at the end of the current chunk, need to close it # chunk_left == None: No current chunk, should read next. # This function returns non-zero or None if the last chunk has # been read. chunk_left = self.chunk_left if not chunk_left: # Can be 0 or None if chunk_left is not None: # We are at the end of chunk. dicard chunk end self._safe_read(2) # toss the CRLF at the end of the chunk try: chunk_left = self._read_next_chunk_size() except ValueError: raise IncompleteRead(b'') if chunk_left == 0: # last chunk: 1*("0") [ chunk-extension ] CRLF self._read_and_discard_trailer() # we read everything; close the "file" self._close_conn() chunk_left = None self.chunk_left = chunk_left return chunk_left def _readall_chunked(self): assert self.chunked != _UNKNOWN value = [] try: while True: chunk_left = self._get_chunk_left() if chunk_left is None: break value.append(self._safe_read(chunk_left)) self.chunk_left = 0 return b''.join(value) except IncompleteRead: raise IncompleteRead(b''.join(value)) def _readinto_chunked(self, b): assert self.chunked != _UNKNOWN total_bytes = 0 mvb = memoryview(b) try: while True: chunk_left = self._get_chunk_left() if chunk_left is None: return total_bytes if len(mvb) <= chunk_left: n = self._safe_readinto(mvb) self.chunk_left = chunk_left - n return total_bytes + n temp_mvb = mvb[:chunk_left] n = self._safe_readinto(temp_mvb) mvb = mvb[n:] total_bytes += n self.chunk_left = 0 except IncompleteRead: raise IncompleteRead(bytes(b[0:total_bytes])) def _safe_read(self, amt): """Read the number of bytes requested, compensating for partial reads. Normally, we have a blocking socket, but a read() can be interrupted by a signal (resulting in a partial read). Note that we cannot distinguish between EOF and an interrupt when zero bytes have been read. IncompleteRead() will be raised in this situation. This function should be used when bytes "should" be present for reading. If the bytes are truly not available (due to EOF), then the IncompleteRead exception can be used to detect the problem. """ s = [] while amt > 0: chunk = self.fp.read(min(amt, MAXAMOUNT)) if not chunk: raise IncompleteRead(b''.join(s), amt) s.append(chunk) amt -= len(chunk) return b"".join(s) def _safe_readinto(self, b): """Same as _safe_read, but for reading into a buffer.""" total_bytes = 0 mvb = memoryview(b) while total_bytes < len(b): if MAXAMOUNT < len(mvb): temp_mvb = mvb[0:MAXAMOUNT] n = self.fp.readinto(temp_mvb) else: n = self.fp.readinto(mvb) if not n: raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b)) mvb = mvb[n:] total_bytes += n return total_bytes def read1(self, n=-1): """Read with at most one underlying system call. If at least one byte is buffered, return that instead. """ if self.fp is None or self._method == "HEAD": return b"" if self.chunked: return self._read1_chunked(n) if self.length is not None and (n < 0 or n > self.length): n = self.length try: result = self.fp.read1(n) except ValueError: if n >= 0: raise # some implementations, like BufferedReader, don't support -1 # Read an arbitrarily selected largeish chunk. result = self.fp.read1(16*1024) if not result and n: self._close_conn() elif self.length is not None: self.length -= len(result) return result def peek(self, n=-1): # Having this enables IOBase.readline() to read more than one # byte at a time if self.fp is None or self._method == "HEAD": return b"" if self.chunked: return self._peek_chunked(n) return self.fp.peek(n) def readline(self, limit=-1): if self.fp is None or self._method == "HEAD": return b"" if self.chunked: # Fallback to IOBase readline which uses peek() and read() return super().readline(limit) if self.length is not None and (limit < 0 or limit > self.length): limit = self.length result = self.fp.readline(limit) if not result and limit: self._close_conn() elif self.length is not None: self.length -= len(result) return result def _read1_chunked(self, n): # Strictly speaking, _get_chunk_left() may cause more than one read, # but that is ok, since that is to satisfy the chunked protocol. chunk_left = self._get_chunk_left() if chunk_left is None or n == 0: return b'' if not (0 <= n <= chunk_left): n = chunk_left # if n is negative or larger than chunk_left read = self.fp.read1(n) self.chunk_left -= len(read) if not read: raise IncompleteRead(b"") return read def _peek_chunked(self, n): # Strictly speaking, _get_chunk_left() may cause more than one read, # but that is ok, since that is to satisfy the chunked protocol. try: chunk_left = self._get_chunk_left() except IncompleteRead: return b'' # peek doesn't worry about protocol if chunk_left is None: return b'' # eof # peek is allowed to return more than requested. Just request the # entire chunk, and truncate what we get. return self.fp.peek(chunk_left)[:chunk_left] def fileno(self): return self.fp.fileno() def getheader(self, name, default=None): '''Returns the value of the header matching *name*. If there are multiple matching headers, the values are combined into a single string separated by commas and spaces. If no matching header is found, returns *default* or None if the *default* is not specified. If the headers are unknown, raises http.client.ResponseNotReady. ''' if self.headers is None: raise ResponseNotReady() headers = self.headers.get_all(name) or default if isinstance(headers, str) or not hasattr(headers, '__iter__'): return headers else: return ', '.join(headers) def getheaders(self): """Return list of (header, value) tuples.""" if self.headers is None: raise ResponseNotReady() return list(self.headers.items()) # We override IOBase.__iter__ so that it doesn't check for closed-ness def __iter__(self): return self # For compatibility with old-style urllib responses. def info(self): '''Returns an instance of the class mimetools.Message containing meta-information associated with the URL. When the method is HTTP, these headers are those returned by the server at the head of the retrieved HTML page (including Content-Length and Content-Type). When the method is FTP, a Content-Length header will be present if (as is now usual) the server passed back a file length in response to the FTP retrieval request. A Content-Type header will be present if the MIME type can be guessed. When the method is local-file, returned headers will include a Date representing the file's last-modified time, a Content-Length giving file size, and a Content-Type containing a guess at the file's type. See also the description of the mimetools module. ''' return self.headers def geturl(self): '''Return the real URL of the page. In some cases, the HTTP server redirects a client to another URL. The urlopen() function handles this transparently, but in some cases the caller needs to know which URL the client was redirected to. The geturl() method can be used to get at this redirected URL. ''' return self.url def getcode(self): '''Return the HTTP status code that was sent with the response, or None if the URL is not an HTTP URL. ''' return self.status class HTTPConnection: _http_vsn = 11 _http_vsn_str = 'HTTP/1.1' response_class = HTTPResponse default_port = HTTP_PORT auto_open = 1 debuglevel = 0 @staticmethod def _is_textIO(stream): """Test whether a file-like object is a text or a binary stream. """ return isinstance(stream, io.TextIOBase) @staticmethod def _get_content_length(body, method): """Get the content-length based on the body. If the body is None, we set Content-Length: 0 for methods that expect a body (RFC 7230, Section 3.3.2). We also set the Content-Length for any method if the body is a str or bytes-like object and not a file. """ if body is None: # do an explicit check for not None here to distinguish # between unset and set but empty if method.upper() in _METHODS_EXPECTING_BODY: return 0 else: return None if hasattr(body, 'read'): # file-like object. return None try: # does it implement the buffer protocol (bytes, bytearray, array)? mv = memoryview(body) return mv.nbytes except TypeError: pass if isinstance(body, str): return len(body) return None def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): self.timeout = timeout self.source_address = source_address self.sock = None self._buffer = [] self.__response = None self.__state = _CS_IDLE self._method = None self._tunnel_host = None self._tunnel_port = None self._tunnel_headers = {} (self.host, self.port) = self._get_hostport(host, port) # This is stored as an instance variable to allow unit # tests to replace it with a suitable mockup self._create_connection = socket.create_connection def set_tunnel(self, host, port=None, headers=None): """Set up host and port for HTTP CONNECT tunnelling. In a connection that uses HTTP CONNECT tunneling, the host passed to the constructor is used as a proxy server that relays all communication to the endpoint passed to `set_tunnel`. This done by sending an HTTP CONNECT request to the proxy server when the connection is established. This method must be called before the HTML connection has been established. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request. """ if self.sock: raise RuntimeError("Can't set up tunnel for established connection") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear() def _get_hostport(self, host, port): if port is None: i = host.rfind(':') j = host.rfind(']') # ipv6 addresses have [...] if i > j: try: port = int(host[i+1:]) except ValueError: if host[i+1:] == "": # http://foo.com:/ == http://foo.com/ port = self.default_port else: raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) host = host[:i] else: port = self.default_port if host and host[0] == '[' and host[-1] == ']': host = host[1:-1] return (host, port) def set_debuglevel(self, level): self.debuglevel = level def _tunnel(self): connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host, self._tunnel_port) connect_bytes = connect_str.encode("ascii") self.send(connect_bytes) for header, value in self._tunnel_headers.items(): header_str = "%s: %s\r\n" % (header, value) header_bytes = header_str.encode("latin-1") self.send(header_bytes) self.send(b'\r\n') response = self.response_class(self.sock, method=self._method) (version, code, message) = response._read_status() if code != http.HTTPStatus.OK: self.close() raise OSError("Tunnel connection failed: %d %s" % (code, message.strip())) while True: line = response.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise LineTooLong("header line") if not line: # for sites which EOF without sending a trailer break if line in (b'\r\n', b'\n', b''): break if self.debuglevel > 0: print('header:', line.decode()) def connect(self): """Connect to the host and port specified in __init__.""" self.sock = self._create_connection( (self.host,self.port), self.timeout, self.source_address) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if self._tunnel_host: self._tunnel() def close(self): """Close the connection to the HTTP server.""" self.__state = _CS_IDLE try: sock = self.sock if sock: self.sock = None sock.close() # close it manually... there may be other refs finally: response = self.__response if response: self.__response = None response.close() def send(self, data): """Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object. """ if self.sock is None: if self.auto_open: self.connect() else: raise NotConnected() if self.debuglevel > 0: print("send:", repr(data)) blocksize = 8192 if hasattr(data, "read") : if self.debuglevel > 0: print("sendIng a read()able") encode = False try: mode = data.mode except AttributeError: # io.BytesIO and other file-like objects don't have a `mode` # attribute. pass else: if "b" not in mode: encode = True if self.debuglevel > 0: print("encoding file using iso-8859-1") while 1: datablock = data.read(blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") self.sock.sendall(datablock) return try: self.sock.sendall(data) except TypeError: if isinstance(data, collections.Iterable): for d in data: self.sock.sendall(d) else: raise TypeError("data should be a bytes-like object " "or an iterable, got %r" % type(data)) def _output(self, s): """Add a line of output to the current request buffer. Assumes that the line does *not* end with \\r\\n. """ self._buffer.append(s) def _read_readable(self, readable): blocksize = 8192 if self.debuglevel > 0: print("sendIng a read()able") encode = self._is_textIO(readable) if encode and self.debuglevel > 0: print("encoding file using iso-8859-1") while True: datablock = readable.read(blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") yield datablock def _send_output(self, message_body=None, encode_chunked=False): """Send the currently buffered request and clear the buffer. Appends an extra \\r\\n to the buffer. A message_body may be specified, to be appended to the request. """ self._buffer.extend((b"", b"")) msg = b"\r\n".join(self._buffer) del self._buffer[:] self.send(msg) if message_body is not None: # create a consistent interface to message_body if hasattr(message_body, 'read'): # Let file-like take precedence over byte-like. This # is needed to allow the current position of mmap'ed # files to be taken into account. chunks = self._read_readable(message_body) else: try: # this is solely to check to see if message_body # implements the buffer API. it /would/ be easier # to capture if PyObject_CheckBuffer was exposed # to Python. memoryview(message_body) except TypeError: try: chunks = iter(message_body) except TypeError: raise TypeError("message_body should be a bytes-like " "object or an iterable, got %r" % type(message_body)) else: # the object implements the buffer interface and # can be passed directly into socket methods chunks = (message_body,) for chunk in chunks: if not chunk: if self.debuglevel > 0: print('Zero length chunk ignored') continue if encode_chunked and self._http_vsn == 11: # chunked encoding chunk = '{0:X}\r\n'.format(len(chunk)).encode('ascii') + chunk + b'\r\n' self.send(chunk) if encode_chunked and self._http_vsn == 11: # end chunked transfer self.send(b'0\r\n\r\n') def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): """Send a request to the server. `method' specifies an HTTP request method, e.g. 'GET'. `url' specifies the object being requested, e.g. '/index.html'. `skip_host' if True does not add automatically a 'Host:' header `skip_accept_encoding' if True does not add automatically an 'Accept-Encoding:' header """ # if a prior response has been completed, then forget about it. if self.__response and self.__response.isclosed(): self.__response = None # in certain cases, we cannot issue another request on this connection. # this occurs when: # 1) we are in the process of sending a request. (_CS_REQ_STARTED) # 2) a response to a previous request has signalled that it is going # to close the connection upon completion. # 3) the headers for the previous response have not been read, thus # we cannot determine whether point (2) is true. (_CS_REQ_SENT) # # if there is no prior response, then we can request at will. # # if point (2) is true, then we will have passed the socket to the # response (effectively meaning, "there is no prior response"), and # will open a new one when a new request is made. # # Note: if a prior response exists, then we *can* start a new request. # We are not allowed to begin fetching the response to this new # request, however, until that prior response is complete. # if self.__state == _CS_IDLE: self.__state = _CS_REQ_STARTED else: raise CannotSendRequest(self.__state) # Save the method we use, we need it later in the response phase self._method = method if not url: url = '/' request = '%s %s %s' % (method, url, self._http_vsn_str) # Non-ASCII characters should have been eliminated earlier self._output(request.encode('ascii')) if self._http_vsn == 11: # Issue some standard headers for better HTTP/1.1 compliance if not skip_host: # this header is issued *only* for HTTP/1.1 # connections. more specifically, this means it is # only issued when the client uses the new # HTTPConnection() class. backwards-compat clients # will be using HTTP/1.0 and those clients may be # issuing this header themselves. we should NOT issue # it twice; some web servers (such as Apache) barf # when they see two Host: headers # If we need a non-standard port,include it in the # header. If the request is going through a proxy, # but the host of the actual URL, not the host of the # proxy. netloc = '' if url.startswith('http'): nil, netloc, nil, nil, nil = urlsplit(url) if netloc: try: netloc_enc = netloc.encode("ascii") except UnicodeEncodeError: netloc_enc = netloc.encode("idna") self.putheader('Host', netloc_enc) else: if self._tunnel_host: host = self._tunnel_host port = self._tunnel_port else: host = self.host port = self.port try: host_enc = host.encode("ascii") except UnicodeEncodeError: host_enc = host.encode("idna") # As per RFC 273, IPv6 address should be wrapped with [] # when used as Host header if host.find(':') >= 0: host_enc = b'[' + host_enc + b']' if port == self.default_port: self.putheader('Host', host_enc) else: host_enc = host_enc.decode("ascii") self.putheader('Host', "%s:%s" % (host_enc, port)) # note: we are assuming that clients will not attempt to set these # headers since *this* library must deal with the # consequences. this also means that when the supporting # libraries are updated to recognize other forms, then this # code should be changed (removed or updated). # we only want a Content-Encoding of "identity" since we don't # support encodings such as x-gzip or x-deflate. if not skip_accept_encoding: self.putheader('Accept-Encoding', 'identity') # we can accept "chunked" Transfer-Encodings, but no others # NOTE: no TE header implies *only* "chunked" #self.putheader('TE', 'chunked') # if TE is supplied in the header, then it must appear in a # Connection header. #self.putheader('Connection', 'TE') else: # For HTTP/1.0, the server will assume "not chunked" pass def putheader(self, header, *values): """Send a request header line to the server. For example: h.putheader('Accept', 'text/html') """ if self.__state != _CS_REQ_STARTED: raise CannotSendHeader() if hasattr(header, 'encode'): header = header.encode('ascii') if not _is_legal_header_name(header): raise ValueError('Invalid header name %r' % (header,)) values = list(values) for i, one_value in enumerate(values): if hasattr(one_value, 'encode'): values[i] = one_value.encode('latin-1') elif isinstance(one_value, int): values[i] = str(one_value).encode('ascii') if _is_illegal_header_value(values[i]): raise ValueError('Invalid header value %r' % (values[i],)) value = b'\r\n\t'.join(values) header = header + b': ' + value self._output(header) def endheaders(self, message_body=None, **kwds): """Indicate that the last header line has been sent to the server. This method sends the request to the server. The optional message_body argument can be used to pass a message body associated with the request. """ encode_chunked = kwds.pop('encode_chunked', False) if kwds: # mimic interpreter error for unrecognized keyword raise TypeError("endheaders() got an unexpected keyword argument '{0}'" .format(kwds.popitem()[0])) if self.__state == _CS_REQ_STARTED: self.__state = _CS_REQ_SENT else: raise CannotSendHeader() self._send_output(message_body, encode_chunked=encode_chunked) def request(self, method, url, body=None, headers={}, **kwds): """Send a complete request to the server.""" encode_chunked = kwds.pop('encode_chunked', False) if kwds: # mimic interpreter error for unrecognized keyword raise TypeError("request() got an unexpected keyword argument '{0}'" .format(kwds.popitem()[0])) self._send_request(method, url, body, headers, encode_chunked) def _set_content_length(self, body, method): # Set the content-length based on the body. If the body is "empty", we # set Content-Length: 0 for methods that expect a body (RFC 7230, # Section 3.3.2). If the body is set for other methods, we set the # header provided we can figure out what the length is. thelen = None method_expects_body = method.upper() in _METHODS_EXPECTING_BODY if body is None and method_expects_body: thelen = '0' elif body is not None: try: thelen = str(len(body)) except TypeError: # If this is a file-like object, try to # fstat its file descriptor try: thelen = str(os.fstat(body.fileno()).st_size) except (AttributeError, OSError): # Don't send a length if this failed if self.debuglevel > 0: print("Cannot stat!!") if thelen is not None: self.putheader('Content-Length', thelen) def _send_request(self, method, url, body, headers, encode_chunked): # Honor explicitly requested Host: and Accept-Encoding: headers. header_names = frozenset(k.lower() for k in headers) skips = {} if 'host' in header_names: skips['skip_host'] = 1 if 'accept-encoding' in header_names: skips['skip_accept_encoding'] = 1 self.putrequest(method, url, **skips) # chunked encoding will happen if HTTP/1.1 is used and either # the caller passes encode_chunked=True or the following # conditions hold: # 1. content-length has not been explicitly set # 2. the body is a file or iterable, but not a str or bytes-like # 3. Transfer-Encoding has NOT been explicitly set by the caller if 'content-length' not in header_names: # only chunk body if not explicitly set for backwards # compatibility, assuming the client code is already handling the # chunking if 'transfer-encoding' not in header_names: # if content-length cannot be automatically determined, fall # back to chunked encoding encode_chunked = False content_length = self._get_content_length(body, method) if content_length is None: if body is not None: if self.debuglevel > 0: print('Unable to determine size of %r' % body) encode_chunked = True self.putheader('Transfer-Encoding', 'chunked') else: self.putheader('Content-Length', str(content_length)) else: encode_chunked = False for hdr, value in headers.items(): self.putheader(hdr, value) if isinstance(body, str): # RFC 2616 Section 3.7.1 says that text default has a # default charset of iso-8859-1. body = _encode(body, 'body') self.endheaders(body, encode_chunked=encode_chunked) def getresponse(self): """Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # if a prior response has been completed, then forget about it. if self.__response and self.__response.isclosed(): self.__response = None # if a prior response exists, then it must be completed (otherwise, we # cannot read this response's header to determine the connection-close # behavior) # # note: if a prior response existed, but was connection-close, then the # socket and response were made independent of this HTTPConnection # object since a new request requires that we open a whole new # connection # # this means the prior response had one of two states: # 1) will_close: this connection was reset and the prior socket and # response operate independently # 2) persistent: the response was retained and we await its # isclosed() status to become true. # if self.__state != _CS_REQ_SENT or self.__response: raise ResponseNotReady(self.__state) if self.debuglevel > 0: response = self.response_class(self.sock, self.debuglevel, method=self._method) else: response = self.response_class(self.sock, method=self._method) try: try: response.begin() except ConnectionError: self.close() raise assert response.will_close != _UNKNOWN self.__state = _CS_IDLE if response.will_close: # this effectively passes the connection to the response self.close() else: # remember this, so we can tell when it is complete self.__response = response return response except: response.close() raise try: import ssl except ImportError: pass else: class HTTPSConnection(HTTPConnection): "This class allows communication via SSL." default_port = HTTPS_PORT # XXX Should key_file and cert_file be deprecated in favour of context? def __init__(self, host, port=None, key_file=None, cert_file=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, context=None, check_hostname=None): super(HTTPSConnection, self).__init__(host, port, timeout, source_address) self.key_file = key_file self.cert_file = cert_file if context is None: context = ssl._create_default_https_context() will_verify = context.verify_mode != ssl.CERT_NONE if check_hostname is None: check_hostname = context.check_hostname if check_hostname and not will_verify: raise ValueError("check_hostname needs a SSL context with " "either CERT_OPTIONAL or CERT_REQUIRED") if key_file or cert_file: context.load_cert_chain(cert_file, key_file) self._context = context self._check_hostname = check_hostname def connect(self): "Connect to a host on a given (SSL) port." super().connect() if self._tunnel_host: server_hostname = self._tunnel_host else: server_hostname = self.host self.sock = self._context.wrap_socket(self.sock, server_hostname=server_hostname) if not self._context.check_hostname and self._check_hostname: try: ssl.match_hostname(self.sock.getpeercert(), server_hostname) except Exception: self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() raise __all__.append("HTTPSConnection") class HTTPException(Exception): # Subclasses that define an __init__ must call Exception.__init__ # or define self.args. Otherwise, str() will fail. pass class NotConnected(HTTPException): pass class InvalidURL(HTTPException): pass class UnknownProtocol(HTTPException): def __init__(self, version): self.args = version, self.version = version class UnknownTransferEncoding(HTTPException): pass class UnimplementedFileMode(HTTPException): pass class IncompleteRead(HTTPException): def __init__(self, partial, expected=None): self.args = partial, self.partial = partial self.expected = expected def __repr__(self): if self.expected is not None: e = ', %i more expected' % self.expected else: e = '' return '%s(%i bytes read%s)' % (self.__class__.__name__, len(self.partial), e) def __str__(self): return repr(self) class ImproperConnectionState(HTTPException): pass class CannotSendRequest(ImproperConnectionState): pass class CannotSendHeader(ImproperConnectionState): pass class ResponseNotReady(ImproperConnectionState): pass class BadStatusLine(HTTPException): def __init__(self, line): if not line: line = repr(line) self.args = line, self.line = line class LineTooLong(HTTPException): def __init__(self, line_type): HTTPException.__init__(self, "got more than %d bytes when reading %s" % (_MAXLINE, line_type)) class RemoteDisconnected(ConnectionResetError, BadStatusLine): def __init__(self, *pos, **kw): BadStatusLine.__init__(self, "") ConnectionResetError.__init__(self, *pos, **kw) # for backwards compatibility error = HTTPException eventlet-0.30.2/eventlet/green/http/cookiejar.py0000644000076500000240000023261014006212666022310 0ustar temotostaff00000000000000# This is part of Python source code with Eventlet-specific modifications. # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights # Reserved # # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 # -------------------------------------------- # # 1. This LICENSE AGREEMENT is between the Python Software Foundation # ("PSF"), and the Individual or Organization ("Licensee") accessing and # otherwise using this software ("Python") in source or binary form and # its associated documentation. # # 2. Subject to the terms and conditions of this License Agreement, PSF hereby # grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, # analyze, test, perform and/or display publicly, prepare derivative works, # distribute, and otherwise use Python alone or in any derivative version, # provided, however, that PSF's License Agreement and PSF's notice of copyright, # i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights # Reserved" are retained in Python alone or in any derivative version prepared by # Licensee. # # 3. In the event Licensee prepares a derivative work that is based on # or incorporates Python or any part thereof, and wants to make # the derivative work available to others as provided herein, then # Licensee hereby agrees to include in any such work a brief summary of # the changes made to Python. # # 4. PSF is making Python available to Licensee on an "AS IS" # basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR # IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND # DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS # FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT # INFRINGE ANY THIRD PARTY RIGHTS. # # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON # FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS # A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, # OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. # # 6. This License Agreement will automatically terminate upon a material # breach of its terms and conditions. # # 7. Nothing in this License Agreement shall be deemed to create any # relationship of agency, partnership, or joint venture between PSF and # Licensee. This License Agreement does not grant permission to use PSF # trademarks or trade name in a trademark sense to endorse or promote # products or services of Licensee, or any third party. # # 8. By copying, installing or otherwise using Python, Licensee # agrees to be bound by the terms and conditions of this License # Agreement. r"""HTTP cookie handling for web clients. This module has (now fairly distant) origins in Gisle Aas' Perl module HTTP::Cookies, from the libwww-perl library. Docstrings, comments and debug strings in this code refer to the attributes of the HTTP cookie system as cookie-attributes, to distinguish them clearly from Python attributes. Class diagram (note that BSDDBCookieJar and the MSIE* classes are not distributed with the Python standard library, but are available from http://wwwsearch.sf.net/): CookieJar____ / \ \ FileCookieJar \ \ / | \ \ \ MozillaCookieJar | LWPCookieJar \ \ | | \ | ---MSIEBase | \ | / | | \ | / MSIEDBCookieJar BSDDBCookieJar |/ MSIECookieJar """ __all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy', 'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar'] import copy import datetime import re import time # Eventlet change: urllib.request used to be imported here but it's not used, # removed for clarity import urllib.parse from calendar import timegm from eventlet.green import threading as _threading, time from eventlet.green.http import client as http_client # only for the default HTTP port debug = False # set to True to enable debugging via the logging module logger = None def _debug(*args): if not debug: return global logger if not logger: import logging logger = logging.getLogger("http.cookiejar") return logger.debug(*args) DEFAULT_HTTP_PORT = str(http_client.HTTP_PORT) MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar " "instance initialised with one)") def _warn_unhandled_exception(): # There are a few catch-all except: statements in this module, for # catching input that's bad in unexpected ways. Warn if any # exceptions are caught there. import io, warnings, traceback f = io.StringIO() traceback.print_exc(None, f) msg = f.getvalue() warnings.warn("http.cookiejar bug!\n%s" % msg, stacklevel=2) # Date/time conversion # ----------------------------------------------------------------------------- EPOCH_YEAR = 1970 def _timegm(tt): year, month, mday, hour, min, sec = tt[:6] if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and (0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)): return timegm(tt) else: return None DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] MONTHS_LOWER = [] for month in MONTHS: MONTHS_LOWER.append(month.lower()) def time2isoz(t=None): """Return a string representing time in seconds since epoch, t. If the function is called without an argument, it will use the current time. The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ", representing Universal Time (UTC, aka GMT). An example of this format is: 1994-11-24 08:49:37Z """ if t is None: dt = datetime.datetime.utcnow() else: dt = datetime.datetime.utcfromtimestamp(t) return "%04d-%02d-%02d %02d:%02d:%02dZ" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) def time2netscape(t=None): """Return a string representing time in seconds since epoch, t. If the function is called without an argument, it will use the current time. The format of the returned string is like this: Wed, DD-Mon-YYYY HH:MM:SS GMT """ if t is None: dt = datetime.datetime.utcnow() else: dt = datetime.datetime.utcfromtimestamp(t) return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( DAYS[dt.weekday()], dt.day, MONTHS[dt.month-1], dt.year, dt.hour, dt.minute, dt.second) UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None} TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$", re.ASCII) def offset_from_tz_string(tz): offset = None if tz in UTC_ZONES: offset = 0 else: m = TIMEZONE_RE.search(tz) if m: offset = 3600 * int(m.group(2)) if m.group(3): offset = offset + 60 * int(m.group(3)) if m.group(1) == '-': offset = -offset return offset def _str2time(day, mon, yr, hr, min, sec, tz): yr = int(yr) if yr > datetime.MAXYEAR: return None # translate month name to number # month numbers start with 1 (January) try: mon = MONTHS_LOWER.index(mon.lower())+1 except ValueError: # maybe it's already a number try: imon = int(mon) except ValueError: return None if 1 <= imon <= 12: mon = imon else: return None # make sure clock elements are defined if hr is None: hr = 0 if min is None: min = 0 if sec is None: sec = 0 day = int(day) hr = int(hr) min = int(min) sec = int(sec) if yr < 1000: # find "obvious" year cur_yr = time.localtime(time.time())[0] m = cur_yr % 100 tmp = yr yr = yr + cur_yr - m m = m - tmp if abs(m) > 50: if m > 0: yr = yr + 100 else: yr = yr - 100 # convert UTC time tuple to seconds since epoch (not timezone-adjusted) t = _timegm((yr, mon, day, hr, min, sec, tz)) if t is not None: # adjust time using timezone string, to get absolute time since epoch if tz is None: tz = "UTC" tz = tz.upper() offset = offset_from_tz_string(tz) if offset is None: return None t = t - offset return t STRICT_DATE_RE = re.compile( r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) " "(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII) WEEKDAY_RE = re.compile( r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII) LOOSE_HTTP_DATE_RE = re.compile( r"""^ (\d\d?) # day (?:\s+|[-\/]) (\w+) # month (?:\s+|[-\/]) (\d+) # year (?: (?:\s+|:) # separator before clock (\d\d?):(\d\d) # hour:min (?::(\d\d))? # optional seconds )? # optional clock \s* ([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone \s* (?:\(\w+\))? # ASCII representation of timezone in parens. \s*$""", re.X | re.ASCII) def http2time(text): """Returns time in seconds since epoch of time represented by a string. Return value is an integer. None is returned if the format of str is unrecognized, the time is outside the representable range, or the timezone string is not recognized. If the string contains no timezone, UTC is assumed. The timezone in the string may be numerical (like "-0800" or "+0100") or a string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the timezone strings equivalent to UTC (zero offset) are known to the function. The function loosely parses the following formats: Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format 09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday) 08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday) 08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday) The parser ignores leading and trailing whitespace. The time may be absent. If the year is given with only 2 digits, the function will select the century that makes the year closest to the current date. """ # fast exit for strictly conforming string m = STRICT_DATE_RE.search(text) if m: g = m.groups() mon = MONTHS_LOWER.index(g[1].lower()) + 1 tt = (int(g[2]), mon, int(g[0]), int(g[3]), int(g[4]), float(g[5])) return _timegm(tt) # No, we need some messy parsing... # clean up text = text.lstrip() text = WEEKDAY_RE.sub("", text, 1) # Useless weekday # tz is time zone specifier string day, mon, yr, hr, min, sec, tz = [None]*7 # loose regexp parse m = LOOSE_HTTP_DATE_RE.search(text) if m is not None: day, mon, yr, hr, min, sec, tz = m.groups() else: return None # bad format return _str2time(day, mon, yr, hr, min, sec, tz) ISO_DATE_RE = re.compile( """^ (\d{4}) # year [-\/]? (\d\d?) # numerical month [-\/]? (\d\d?) # day (?: (?:\s+|[-:Tt]) # separator before clock (\d\d?):?(\d\d) # hour:min (?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional) )? # optional clock \s* ([-+]?\d\d?:?(:?\d\d)? |Z|z)? # timezone (Z is "zero meridian", i.e. GMT) \s*$""", re.X | re. ASCII) def iso2time(text): """ As for http2time, but parses the ISO 8601 formats: 1994-02-03 14:15:29 -0100 -- ISO 8601 format 1994-02-03 14:15:29 -- zone is optional 1994-02-03 -- only date 1994-02-03T14:15:29 -- Use T as separator 19940203T141529Z -- ISO 8601 compact format 19940203 -- only date """ # clean up text = text.lstrip() # tz is time zone specifier string day, mon, yr, hr, min, sec, tz = [None]*7 # loose regexp parse m = ISO_DATE_RE.search(text) if m is not None: # XXX there's an extra bit of the timezone I'm ignoring here: is # this the right thing to do? yr, mon, day, hr, min, sec, tz, _ = m.groups() else: return None # bad format return _str2time(day, mon, yr, hr, min, sec, tz) # Header parsing # ----------------------------------------------------------------------------- def unmatched(match): """Return unmatched part of re.Match object.""" start, end = match.span(0) return match.string[:start]+match.string[end:] HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)") HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"") HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)") HEADER_ESCAPE_RE = re.compile(r"\\(.)") def split_header_words(header_values): r"""Parse header values into a list of lists containing key,value pairs. The function knows how to deal with ",", ";" and "=" as well as quoted values after "=". A list of space separated tokens are parsed as if they were separated by ";". If the header_values passed as argument contains multiple values, then they are treated as if they were a single value separated by comma ",". This means that this function is useful for parsing header fields that follow this syntax (BNF as from the HTTP/1.1 specification, but we relax the requirement for tokens). headers = #header header = (token | parameter) *( [";"] (token | parameter)) token = 1* separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT quoted-string = ( <"> *(qdtext | quoted-pair ) <"> ) qdtext = > quoted-pair = "\" CHAR parameter = attribute "=" value attribute = token value = token | quoted-string Each header is represented by a list of key/value pairs. The value for a simple token (not part of a parameter) is None. Syntactically incorrect headers will not necessarily be parsed as you would want. This is easier to describe with some examples: >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz']) [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]] >>> split_header_words(['text/html; charset="iso-8859-1"']) [[('text/html', None), ('charset', 'iso-8859-1')]] >>> split_header_words([r'Basic realm="\"foo\bar\""']) [[('Basic', None), ('realm', '"foobar"')]] """ assert not isinstance(header_values, str) result = [] for text in header_values: orig_text = text pairs = [] while text: m = HEADER_TOKEN_RE.search(text) if m: text = unmatched(m) name = m.group(1) m = HEADER_QUOTED_VALUE_RE.search(text) if m: # quoted value text = unmatched(m) value = m.group(1) value = HEADER_ESCAPE_RE.sub(r"\1", value) else: m = HEADER_VALUE_RE.search(text) if m: # unquoted value text = unmatched(m) value = m.group(1) value = value.rstrip() else: # no value, a lone token value = None pairs.append((name, value)) elif text.lstrip().startswith(","): # concatenated headers, as per RFC 2616 section 4.2 text = text.lstrip()[1:] if pairs: result.append(pairs) pairs = [] else: # skip junk non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text) assert nr_junk_chars > 0, ( "split_header_words bug: '%s', '%s', %s" % (orig_text, text, pairs)) text = non_junk if pairs: result.append(pairs) return result HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])") def join_header_words(lists): """Do the inverse (almost) of the conversion done by split_header_words. Takes a list of lists of (key, value) pairs and produces a single header value. Attribute values are quoted if needed. >>> join_header_words([[("text/plain", None), ("charset", "iso-8859-1")]]) 'text/plain; charset="iso-8859-1"' >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859-1")]]) 'text/plain, charset="iso-8859-1"' """ headers = [] for pairs in lists: attr = [] for k, v in pairs: if v is not None: if not re.search(r"^\w+$", v): v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \ v = '"%s"' % v k = "%s=%s" % (k, v) attr.append(k) if attr: headers.append("; ".join(attr)) return ", ".join(headers) def strip_quotes(text): if text.startswith('"'): text = text[1:] if text.endswith('"'): text = text[:-1] return text def parse_ns_headers(ns_headers): """Ad-hoc parser for Netscape protocol cookie-attributes. The old Netscape cookie format for Set-Cookie can for instance contain an unquoted "," in the expires field, so we have to use this ad-hoc parser instead of split_header_words. XXX This may not make the best possible effort to parse all the crap that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient parser is probably better, so could do worse than following that if this ever gives any trouble. Currently, this is also used for parsing RFC 2109 cookies. """ known_attrs = ("expires", "domain", "path", "secure", # RFC 2109 attrs (may turn up in Netscape cookies, too) "version", "port", "max-age") result = [] for ns_header in ns_headers: pairs = [] version_set = False # XXX: The following does not strictly adhere to RFCs in that empty # names and values are legal (the former will only appear once and will # be overwritten if multiple occurrences are present). This is # mostly to deal with backwards compatibility. for ii, param in enumerate(ns_header.split(';')): param = param.strip() key, sep, val = param.partition('=') key = key.strip() if not key: if ii == 0: break else: continue # allow for a distinction between present and empty and missing # altogether val = val.strip() if sep else None if ii != 0: lc = key.lower() if lc in known_attrs: key = lc if key == "version": # This is an RFC 2109 cookie. if val is not None: val = strip_quotes(val) version_set = True elif key == "expires": # convert expires date to seconds since epoch if val is not None: val = http2time(strip_quotes(val)) # None if invalid pairs.append((key, val)) if pairs: if not version_set: pairs.append(("version", "0")) result.append(pairs) return result IPV4_RE = re.compile(r"\.\d+$", re.ASCII) def is_HDN(text): """Return True if text is a host domain name.""" # XXX # This may well be wrong. Which RFC is HDN defined in, if any (for # the purposes of RFC 2965)? # For the current implementation, what about IPv6? Remember to look # at other uses of IPV4_RE also, if change this. if IPV4_RE.search(text): return False if text == "": return False if text[0] == "." or text[-1] == ".": return False return True def domain_match(A, B): """Return True if domain A domain-matches domain B, according to RFC 2965. A and B may be host domain names or IP addresses. RFC 2965, section 1: Host names can be specified either as an IP address or a HDN string. Sometimes we compare one host name with another. (Such comparisons SHALL be case-insensitive.) Host A's name domain-matches host B's if * their host name strings string-compare equal; or * A is a HDN string and has the form NB, where N is a non-empty name string, B has the form .B', and B' is a HDN string. (So, x.y.com domain-matches .Y.com but not Y.com.) Note that domain-match is not a commutative operation: a.b.c.com domain-matches .c.com, but not the reverse. """ # Note that, if A or B are IP addresses, the only relevant part of the # definition of the domain-match algorithm is the direct string-compare. A = A.lower() B = B.lower() if A == B: return True if not is_HDN(A): return False i = A.rfind(B) if i == -1 or i == 0: # A does not have form NB, or N is the empty string return False if not B.startswith("."): return False if not is_HDN(B[1:]): return False return True def liberal_is_HDN(text): """Return True if text is a sort-of-like a host domain name. For accepting/blocking domains. """ if IPV4_RE.search(text): return False return True def user_domain_match(A, B): """For blocking/accepting domains. A and B may be host domain names or IP addresses. """ A = A.lower() B = B.lower() if not (liberal_is_HDN(A) and liberal_is_HDN(B)): if A == B: # equal IP addresses return True return False initial_dot = B.startswith(".") if initial_dot and A.endswith(B): return True if not initial_dot and A == B: return True return False cut_port_re = re.compile(r":\d+$", re.ASCII) def request_host(request): """Return request-host, as defined by RFC 2965. Variation from RFC: returned value is lowercased, for convenient comparison. """ url = request.get_full_url() host = urllib.parse.urlparse(url)[1] if host == "": host = request.get_header("Host", "") # remove port, if present host = cut_port_re.sub("", host, 1) return host.lower() def eff_request_host(request): """Return a tuple (request-host, effective request-host name). As defined by RFC 2965, except both are lowercased. """ erhn = req_host = request_host(request) if req_host.find(".") == -1 and not IPV4_RE.search(req_host): erhn = req_host + ".local" return req_host, erhn def request_path(request): """Path component of request-URI, as defined by RFC 2965.""" url = request.get_full_url() parts = urllib.parse.urlsplit(url) path = escape_path(parts.path) if not path.startswith("/"): # fix bad RFC 2396 absoluteURI path = "/" + path return path def request_port(request): host = request.host i = host.find(':') if i >= 0: port = host[i+1:] try: int(port) except ValueError: _debug("nonnumeric port: '%s'", port) return None else: port = DEFAULT_HTTP_PORT return port # Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't # need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738). HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()" ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])") def uppercase_escaped_char(match): return "%%%s" % match.group(1).upper() def escape_path(path): """Escape any invalid characters in HTTP URL, and uppercase all escapes.""" # There's no knowing what character encoding was used to create URLs # containing %-escapes, but since we have to pick one to escape invalid # path characters, we pick UTF-8, as recommended in the HTML 4.0 # specification: # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1 # And here, kind of: draft-fielding-uri-rfc2396bis-03 # (And in draft IRI specification: draft-duerst-iri-05) # (And here, for new URI schemes: RFC 2718) path = urllib.parse.quote(path, HTTP_PATH_SAFE) path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path) return path def reach(h): """Return reach of host h, as defined by RFC 2965, section 1. The reach R of a host name H is defined as follows: * If - H is the host domain name of a host; and, - H has the form A.B; and - A has no embedded (that is, interior) dots; and - B has at least one embedded dot, or B is the string "local". then the reach of H is .B. * Otherwise, the reach of H is H. >>> reach("www.acme.com") '.acme.com' >>> reach("acme.com") 'acme.com' >>> reach("acme.local") '.local' """ i = h.find(".") if i >= 0: #a = h[:i] # this line is only here to show what a is b = h[i+1:] i = b.find(".") if is_HDN(h) and (i >= 0 or b == "local"): return "."+b return h def is_third_party(request): """ RFC 2965, section 3.3.6: An unverifiable transaction is to a third-party host if its request- host U does not domain-match the reach R of the request-host O in the origin transaction. """ req_host = request_host(request) if not domain_match(req_host, reach(request.origin_req_host)): return True else: return False class Cookie: """HTTP Cookie. This class represents both Netscape and RFC 2965 cookies. This is deliberately a very simple class. It just holds attributes. It's possible to construct Cookie instances that don't comply with the cookie standards. CookieJar.make_cookies is the factory function for Cookie objects -- it deals with cookie parsing, supplying defaults, and normalising to the representation used in this class. CookiePolicy is responsible for checking them to see whether they should be accepted from and returned to the server. Note that the port may be present in the headers, but unspecified ("Port" rather than"Port=80", for example); if this is the case, port is None. """ def __init__(self, version, name, value, port, port_specified, domain, domain_specified, domain_initial_dot, path, path_specified, secure, expires, discard, comment, comment_url, rest, rfc2109=False, ): if version is not None: version = int(version) if expires is not None: expires = int(float(expires)) if port is None and port_specified is True: raise ValueError("if port is None, port_specified must be false") self.version = version self.name = name self.value = value self.port = port self.port_specified = port_specified # normalise case, as per RFC 2965 section 3.3.3 self.domain = domain.lower() self.domain_specified = domain_specified # Sigh. We need to know whether the domain given in the # cookie-attribute had an initial dot, in order to follow RFC 2965 # (as clarified in draft errata). Needed for the returned $Domain # value. self.domain_initial_dot = domain_initial_dot self.path = path self.path_specified = path_specified self.secure = secure self.expires = expires self.discard = discard self.comment = comment self.comment_url = comment_url self.rfc2109 = rfc2109 self._rest = copy.copy(rest) def has_nonstandard_attr(self, name): return name in self._rest def get_nonstandard_attr(self, name, default=None): return self._rest.get(name, default) def set_nonstandard_attr(self, name, value): self._rest[name] = value def is_expired(self, now=None): if now is None: now = time.time() if (self.expires is not None) and (self.expires <= now): return True return False def __str__(self): if self.port is None: p = "" else: p = ":"+self.port limit = self.domain + p + self.path if self.value is not None: namevalue = "%s=%s" % (self.name, self.value) else: namevalue = self.name return "" % (namevalue, limit) def __repr__(self): args = [] for name in ("version", "name", "value", "port", "port_specified", "domain", "domain_specified", "domain_initial_dot", "path", "path_specified", "secure", "expires", "discard", "comment", "comment_url", ): attr = getattr(self, name) args.append("%s=%s" % (name, repr(attr))) args.append("rest=%s" % repr(self._rest)) args.append("rfc2109=%s" % repr(self.rfc2109)) return "%s(%s)" % (self.__class__.__name__, ", ".join(args)) class CookiePolicy: """Defines which cookies get accepted from and returned to server. May also modify cookies, though this is probably a bad idea. The subclass DefaultCookiePolicy defines the standard rules for Netscape and RFC 2965 cookies -- override that if you want a customised policy. """ def set_ok(self, cookie, request): """Return true if (and only if) cookie should be accepted from server. Currently, pre-expired cookies never get this far -- the CookieJar class deletes such cookies itself. """ raise NotImplementedError() def return_ok(self, cookie, request): """Return true if (and only if) cookie should be returned to server.""" raise NotImplementedError() def domain_return_ok(self, domain, request): """Return false if cookies should not be returned, given cookie domain. """ return True def path_return_ok(self, path, request): """Return false if cookies should not be returned, given cookie path. """ return True class DefaultCookiePolicy(CookiePolicy): """Implements the standard rules for accepting and returning cookies.""" DomainStrictNoDots = 1 DomainStrictNonDomain = 2 DomainRFC2965Match = 4 DomainLiberal = 0 DomainStrict = DomainStrictNoDots|DomainStrictNonDomain def __init__(self, blocked_domains=None, allowed_domains=None, netscape=True, rfc2965=False, rfc2109_as_netscape=None, hide_cookie2=False, strict_domain=False, strict_rfc2965_unverifiable=True, strict_ns_unverifiable=False, strict_ns_domain=DomainLiberal, strict_ns_set_initial_dollar=False, strict_ns_set_path=False, ): """Constructor arguments should be passed as keyword arguments only.""" self.netscape = netscape self.rfc2965 = rfc2965 self.rfc2109_as_netscape = rfc2109_as_netscape self.hide_cookie2 = hide_cookie2 self.strict_domain = strict_domain self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable self.strict_ns_unverifiable = strict_ns_unverifiable self.strict_ns_domain = strict_ns_domain self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar self.strict_ns_set_path = strict_ns_set_path if blocked_domains is not None: self._blocked_domains = tuple(blocked_domains) else: self._blocked_domains = () if allowed_domains is not None: allowed_domains = tuple(allowed_domains) self._allowed_domains = allowed_domains def blocked_domains(self): """Return the sequence of blocked domains (as a tuple).""" return self._blocked_domains def set_blocked_domains(self, blocked_domains): """Set the sequence of blocked domains.""" self._blocked_domains = tuple(blocked_domains) def is_blocked(self, domain): for blocked_domain in self._blocked_domains: if user_domain_match(domain, blocked_domain): return True return False def allowed_domains(self): """Return None, or the sequence of allowed domains (as a tuple).""" return self._allowed_domains def set_allowed_domains(self, allowed_domains): """Set the sequence of allowed domains, or None.""" if allowed_domains is not None: allowed_domains = tuple(allowed_domains) self._allowed_domains = allowed_domains def is_not_allowed(self, domain): if self._allowed_domains is None: return False for allowed_domain in self._allowed_domains: if user_domain_match(domain, allowed_domain): return False return True def set_ok(self, cookie, request): """ If you override .set_ok(), be sure to call this method. If it returns false, so should your subclass (assuming your subclass wants to be more strict about which cookies to accept). """ _debug(" - checking cookie %s=%s", cookie.name, cookie.value) assert cookie.name is not None for n in "version", "verifiability", "name", "path", "domain", "port": fn_name = "set_ok_"+n fn = getattr(self, fn_name) if not fn(cookie, request): return False return True def set_ok_version(self, cookie, request): if cookie.version is None: # Version is always set to 0 by parse_ns_headers if it's a Netscape # cookie, so this must be an invalid RFC 2965 cookie. _debug(" Set-Cookie2 without version attribute (%s=%s)", cookie.name, cookie.value) return False if cookie.version > 0 and not self.rfc2965: _debug(" RFC 2965 cookies are switched off") return False elif cookie.version == 0 and not self.netscape: _debug(" Netscape cookies are switched off") return False return True def set_ok_verifiability(self, cookie, request): if request.unverifiable and is_third_party(request): if cookie.version > 0 and self.strict_rfc2965_unverifiable: _debug(" third-party RFC 2965 cookie during " "unverifiable transaction") return False elif cookie.version == 0 and self.strict_ns_unverifiable: _debug(" third-party Netscape cookie during " "unverifiable transaction") return False return True def set_ok_name(self, cookie, request): # Try and stop servers setting V0 cookies designed to hack other # servers that know both V0 and V1 protocols. if (cookie.version == 0 and self.strict_ns_set_initial_dollar and cookie.name.startswith("$")): _debug(" illegal name (starts with '$'): '%s'", cookie.name) return False return True def set_ok_path(self, cookie, request): if cookie.path_specified: req_path = request_path(request) if ((cookie.version > 0 or (cookie.version == 0 and self.strict_ns_set_path)) and not req_path.startswith(cookie.path)): _debug(" path attribute %s is not a prefix of request " "path %s", cookie.path, req_path) return False return True def set_ok_domain(self, cookie, request): if self.is_blocked(cookie.domain): _debug(" domain %s is in user block-list", cookie.domain) return False if self.is_not_allowed(cookie.domain): _debug(" domain %s is not in user allow-list", cookie.domain) return False if cookie.domain_specified: req_host, erhn = eff_request_host(request) domain = cookie.domain if self.strict_domain and (domain.count(".") >= 2): # XXX This should probably be compared with the Konqueror # (kcookiejar.cpp) and Mozilla implementations, but it's a # losing battle. i = domain.rfind(".") j = domain.rfind(".", 0, i) if j == 0: # domain like .foo.bar tld = domain[i+1:] sld = domain[j+1:i] if sld.lower() in ("co", "ac", "com", "edu", "org", "net", "gov", "mil", "int", "aero", "biz", "cat", "coop", "info", "jobs", "mobi", "museum", "name", "pro", "travel", "eu") and len(tld) == 2: # domain like .co.uk _debug(" country-code second level domain %s", domain) return False if domain.startswith("."): undotted_domain = domain[1:] else: undotted_domain = domain embedded_dots = (undotted_domain.find(".") >= 0) if not embedded_dots and domain != ".local": _debug(" non-local domain %s contains no embedded dot", domain) return False if cookie.version == 0: if (not erhn.endswith(domain) and (not erhn.startswith(".") and not ("."+erhn).endswith(domain))): _debug(" effective request-host %s (even with added " "initial dot) does not end with %s", erhn, domain) return False if (cookie.version > 0 or (self.strict_ns_domain & self.DomainRFC2965Match)): if not domain_match(erhn, domain): _debug(" effective request-host %s does not domain-match " "%s", erhn, domain) return False if (cookie.version > 0 or (self.strict_ns_domain & self.DomainStrictNoDots)): host_prefix = req_host[:-len(domain)] if (host_prefix.find(".") >= 0 and not IPV4_RE.search(req_host)): _debug(" host prefix %s for domain %s contains a dot", host_prefix, domain) return False return True def set_ok_port(self, cookie, request): if cookie.port_specified: req_port = request_port(request) if req_port is None: req_port = "80" else: req_port = str(req_port) for p in cookie.port.split(","): try: int(p) except ValueError: _debug(" bad port %s (not numeric)", p) return False if p == req_port: break else: _debug(" request port (%s) not found in %s", req_port, cookie.port) return False return True def return_ok(self, cookie, request): """ If you override .return_ok(), be sure to call this method. If it returns false, so should your subclass (assuming your subclass wants to be more strict about which cookies to return). """ # Path has already been checked by .path_return_ok(), and domain # blocking done by .domain_return_ok(). _debug(" - checking cookie %s=%s", cookie.name, cookie.value) for n in "version", "verifiability", "secure", "expires", "port", "domain": fn_name = "return_ok_"+n fn = getattr(self, fn_name) if not fn(cookie, request): return False return True def return_ok_version(self, cookie, request): if cookie.version > 0 and not self.rfc2965: _debug(" RFC 2965 cookies are switched off") return False elif cookie.version == 0 and not self.netscape: _debug(" Netscape cookies are switched off") return False return True def return_ok_verifiability(self, cookie, request): if request.unverifiable and is_third_party(request): if cookie.version > 0 and self.strict_rfc2965_unverifiable: _debug(" third-party RFC 2965 cookie during unverifiable " "transaction") return False elif cookie.version == 0 and self.strict_ns_unverifiable: _debug(" third-party Netscape cookie during unverifiable " "transaction") return False return True def return_ok_secure(self, cookie, request): if cookie.secure and request.type != "https": _debug(" secure cookie with non-secure request") return False return True def return_ok_expires(self, cookie, request): if cookie.is_expired(self._now): _debug(" cookie expired") return False return True def return_ok_port(self, cookie, request): if cookie.port: req_port = request_port(request) if req_port is None: req_port = "80" for p in cookie.port.split(","): if p == req_port: break else: _debug(" request port %s does not match cookie port %s", req_port, cookie.port) return False return True def return_ok_domain(self, cookie, request): req_host, erhn = eff_request_host(request) domain = cookie.domain # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't if (cookie.version == 0 and (self.strict_ns_domain & self.DomainStrictNonDomain) and not cookie.domain_specified and domain != erhn): _debug(" cookie with unspecified domain does not string-compare " "equal to request domain") return False if cookie.version > 0 and not domain_match(erhn, domain): _debug(" effective request-host name %s does not domain-match " "RFC 2965 cookie domain %s", erhn, domain) return False if cookie.version == 0 and not ("."+erhn).endswith(domain): _debug(" request-host %s does not match Netscape cookie domain " "%s", req_host, domain) return False return True def domain_return_ok(self, domain, request): # Liberal check of. This is here as an optimization to avoid # having to load lots of MSIE cookie files unless necessary. req_host, erhn = eff_request_host(request) if not req_host.startswith("."): req_host = "."+req_host if not erhn.startswith("."): erhn = "."+erhn if not (req_host.endswith(domain) or erhn.endswith(domain)): #_debug(" request domain %s does not match cookie domain %s", # req_host, domain) return False if self.is_blocked(domain): _debug(" domain %s is in user block-list", domain) return False if self.is_not_allowed(domain): _debug(" domain %s is not in user allow-list", domain) return False return True def path_return_ok(self, path, request): _debug("- checking cookie path=%s", path) req_path = request_path(request) if not req_path.startswith(path): _debug(" %s does not path-match %s", req_path, path) return False return True def vals_sorted_by_key(adict): keys = sorted(adict.keys()) return map(adict.get, keys) def deepvalues(mapping): """Iterates over nested mapping, depth-first, in sorted order by key.""" values = vals_sorted_by_key(mapping) for obj in values: mapping = False try: obj.items except AttributeError: pass else: mapping = True yield from deepvalues(obj) if not mapping: yield obj # Used as second parameter to dict.get() method, to distinguish absent # dict key from one with a None value. class Absent: pass class CookieJar: """Collection of HTTP cookies. You may not need to know about this class: try urllib.request.build_opener(HTTPCookieProcessor).open(url). """ non_word_re = re.compile(r"\W") quote_re = re.compile(r"([\"\\])") strict_domain_re = re.compile(r"\.?[^.]*") domain_re = re.compile(r"[^.]*") dots_re = re.compile(r"^\.+") magic_re = re.compile(r"^\#LWP-Cookies-(\d+\.\d+)", re.ASCII) def __init__(self, policy=None): if policy is None: policy = DefaultCookiePolicy() self._policy = policy self._cookies_lock = _threading.RLock() self._cookies = {} def set_policy(self, policy): self._policy = policy def _cookies_for_domain(self, domain, request): cookies = [] if not self._policy.domain_return_ok(domain, request): return [] _debug("Checking %s for cookies to return", domain) cookies_by_path = self._cookies[domain] for path in cookies_by_path.keys(): if not self._policy.path_return_ok(path, request): continue cookies_by_name = cookies_by_path[path] for cookie in cookies_by_name.values(): if not self._policy.return_ok(cookie, request): _debug(" not returning cookie") continue _debug(" it's a match") cookies.append(cookie) return cookies def _cookies_for_request(self, request): """Return a list of cookies to be returned to server.""" cookies = [] for domain in self._cookies.keys(): cookies.extend(self._cookies_for_domain(domain, request)) return cookies def _cookie_attrs(self, cookies): """Return a list of cookie-attributes to be returned to server. like ['foo="bar"; $Path="/"', ...] The $Version attribute is also added when appropriate (currently only once per request). """ # add cookies in order of most specific (ie. longest) path first cookies.sort(key=lambda a: len(a.path), reverse=True) version_set = False attrs = [] for cookie in cookies: # set version of Cookie header # XXX # What should it be if multiple matching Set-Cookie headers have # different versions themselves? # Answer: there is no answer; was supposed to be settled by # RFC 2965 errata, but that may never appear... version = cookie.version if not version_set: version_set = True if version > 0: attrs.append("$Version=%s" % version) # quote cookie value if necessary # (not for Netscape protocol, which already has any quotes # intact, due to the poorly-specified Netscape Cookie: syntax) if ((cookie.value is not None) and self.non_word_re.search(cookie.value) and version > 0): value = self.quote_re.sub(r"\\\1", cookie.value) else: value = cookie.value # add cookie-attributes to be returned in Cookie header if cookie.value is None: attrs.append(cookie.name) else: attrs.append("%s=%s" % (cookie.name, value)) if version > 0: if cookie.path_specified: attrs.append('$Path="%s"' % cookie.path) if cookie.domain.startswith("."): domain = cookie.domain if (not cookie.domain_initial_dot and domain.startswith(".")): domain = domain[1:] attrs.append('$Domain="%s"' % domain) if cookie.port is not None: p = "$Port" if cookie.port_specified: p = p + ('="%s"' % cookie.port) attrs.append(p) return attrs def add_cookie_header(self, request): """Add correct Cookie: header to request (urllib.request.Request object). The Cookie2 header is also added unless policy.hide_cookie2 is true. """ _debug("add_cookie_header") self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) cookies = self._cookies_for_request(request) attrs = self._cookie_attrs(cookies) if attrs: if not request.has_header("Cookie"): request.add_unredirected_header( "Cookie", "; ".join(attrs)) # if necessary, advertise that we know RFC 2965 if (self._policy.rfc2965 and not self._policy.hide_cookie2 and not request.has_header("Cookie2")): for cookie in cookies: if cookie.version != 1: request.add_unredirected_header("Cookie2", '$Version="1"') break finally: self._cookies_lock.release() self.clear_expired_cookies() def _normalized_cookie_tuples(self, attrs_set): """Return list of tuples containing normalised cookie information. attrs_set is the list of lists of key,value pairs extracted from the Set-Cookie or Set-Cookie2 headers. Tuples are name, value, standard, rest, where name and value are the cookie name and value, standard is a dictionary containing the standard cookie-attributes (discard, secure, version, expires or max-age, domain, path and port) and rest is a dictionary containing the rest of the cookie-attributes. """ cookie_tuples = [] boolean_attrs = "discard", "secure" value_attrs = ("version", "expires", "max-age", "domain", "path", "port", "comment", "commenturl") for cookie_attrs in attrs_set: name, value = cookie_attrs[0] # Build dictionary of standard cookie-attributes (standard) and # dictionary of other cookie-attributes (rest). # Note: expiry time is normalised to seconds since epoch. V0 # cookies should have the Expires cookie-attribute, and V1 cookies # should have Max-Age, but since V1 includes RFC 2109 cookies (and # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we # accept either (but prefer Max-Age). max_age_set = False bad_cookie = False standard = {} rest = {} for k, v in cookie_attrs[1:]: lc = k.lower() # don't lose case distinction for unknown fields if lc in value_attrs or lc in boolean_attrs: k = lc if k in boolean_attrs and v is None: # boolean cookie-attribute is present, but has no value # (like "discard", rather than "port=80") v = True if k in standard: # only first value is significant continue if k == "domain": if v is None: _debug(" missing value for domain attribute") bad_cookie = True break # RFC 2965 section 3.3.3 v = v.lower() if k == "expires": if max_age_set: # Prefer max-age to expires (like Mozilla) continue if v is None: _debug(" missing or invalid value for expires " "attribute: treating as session cookie") continue if k == "max-age": max_age_set = True try: v = int(v) except ValueError: _debug(" missing or invalid (non-numeric) value for " "max-age attribute") bad_cookie = True break # convert RFC 2965 Max-Age to seconds since epoch # XXX Strictly you're supposed to follow RFC 2616 # age-calculation rules. Remember that zero Max-Age # is a request to discard (old and new) cookie, though. k = "expires" v = self._now + v if (k in value_attrs) or (k in boolean_attrs): if (v is None and k not in ("port", "comment", "commenturl")): _debug(" missing value for %s attribute" % k) bad_cookie = True break standard[k] = v else: rest[k] = v if bad_cookie: continue cookie_tuples.append((name, value, standard, rest)) return cookie_tuples def _cookie_from_cookie_tuple(self, tup, request): # standard is dict of standard cookie-attributes, rest is dict of the # rest of them name, value, standard, rest = tup domain = standard.get("domain", Absent) path = standard.get("path", Absent) port = standard.get("port", Absent) expires = standard.get("expires", Absent) # set the easy defaults version = standard.get("version", None) if version is not None: try: version = int(version) except ValueError: return None # invalid version, ignore cookie secure = standard.get("secure", False) # (discard is also set if expires is Absent) discard = standard.get("discard", False) comment = standard.get("comment", None) comment_url = standard.get("commenturl", None) # set default path if path is not Absent and path != "": path_specified = True path = escape_path(path) else: path_specified = False path = request_path(request) i = path.rfind("/") if i != -1: if version == 0: # Netscape spec parts company from reality here path = path[:i] else: path = path[:i+1] if len(path) == 0: path = "/" # set default domain domain_specified = domain is not Absent # but first we have to remember whether it starts with a dot domain_initial_dot = False if domain_specified: domain_initial_dot = bool(domain.startswith(".")) if domain is Absent: req_host, erhn = eff_request_host(request) domain = erhn elif not domain.startswith("."): domain = "."+domain # set default port port_specified = False if port is not Absent: if port is None: # Port attr present, but has no value: default to request port. # Cookie should then only be sent back on that port. port = request_port(request) else: port_specified = True port = re.sub(r"\s+", "", port) else: # No port attr present. Cookie can be sent back on any port. port = None # set default expires and discard if expires is Absent: expires = None discard = True elif expires <= self._now: # Expiry date in past is request to delete cookie. This can't be # in DefaultCookiePolicy, because can't delete cookies there. try: self.clear(domain, path, name) except KeyError: pass _debug("Expiring cookie, domain='%s', path='%s', name='%s'", domain, path, name) return None return Cookie(version, name, value, port, port_specified, domain, domain_specified, domain_initial_dot, path, path_specified, secure, expires, discard, comment, comment_url, rest) def _cookies_from_attrs_set(self, attrs_set, request): cookie_tuples = self._normalized_cookie_tuples(attrs_set) cookies = [] for tup in cookie_tuples: cookie = self._cookie_from_cookie_tuple(tup, request) if cookie: cookies.append(cookie) return cookies def _process_rfc2109_cookies(self, cookies): rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None) if rfc2109_as_ns is None: rfc2109_as_ns = not self._policy.rfc2965 for cookie in cookies: if cookie.version == 1: cookie.rfc2109 = True if rfc2109_as_ns: # treat 2109 cookies as Netscape cookies rather than # as RFC2965 cookies cookie.version = 0 def make_cookies(self, response, request): """Return sequence of Cookie objects extracted from response object.""" # get cookie-attributes for RFC 2965 and Netscape protocols headers = response.info() rfc2965_hdrs = headers.get_all("Set-Cookie2", []) ns_hdrs = headers.get_all("Set-Cookie", []) rfc2965 = self._policy.rfc2965 netscape = self._policy.netscape if ((not rfc2965_hdrs and not ns_hdrs) or (not ns_hdrs and not rfc2965) or (not rfc2965_hdrs and not netscape) or (not netscape and not rfc2965)): return [] # no relevant cookie headers: quick exit try: cookies = self._cookies_from_attrs_set( split_header_words(rfc2965_hdrs), request) except Exception: _warn_unhandled_exception() cookies = [] if ns_hdrs and netscape: try: # RFC 2109 and Netscape cookies ns_cookies = self._cookies_from_attrs_set( parse_ns_headers(ns_hdrs), request) except Exception: _warn_unhandled_exception() ns_cookies = [] self._process_rfc2109_cookies(ns_cookies) # Look for Netscape cookies (from Set-Cookie headers) that match # corresponding RFC 2965 cookies (from Set-Cookie2 headers). # For each match, keep the RFC 2965 cookie and ignore the Netscape # cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are # bundled in with the Netscape cookies for this purpose, which is # reasonable behaviour. if rfc2965: lookup = {} for cookie in cookies: lookup[(cookie.domain, cookie.path, cookie.name)] = None def no_matching_rfc2965(ns_cookie, lookup=lookup): key = ns_cookie.domain, ns_cookie.path, ns_cookie.name return key not in lookup ns_cookies = filter(no_matching_rfc2965, ns_cookies) if ns_cookies: cookies.extend(ns_cookies) return cookies def set_cookie_if_ok(self, cookie, request): """Set a cookie if policy says it's OK to do so.""" self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) if self._policy.set_ok(cookie, request): self.set_cookie(cookie) finally: self._cookies_lock.release() def set_cookie(self, cookie): """Set a cookie, without checking whether or not it should be set.""" c = self._cookies self._cookies_lock.acquire() try: if cookie.domain not in c: c[cookie.domain] = {} c2 = c[cookie.domain] if cookie.path not in c2: c2[cookie.path] = {} c3 = c2[cookie.path] c3[cookie.name] = cookie finally: self._cookies_lock.release() def extract_cookies(self, response, request): """Extract cookies from response, where allowable given the request.""" _debug("extract_cookies: %s", response.info()) self._cookies_lock.acquire() try: self._policy._now = self._now = int(time.time()) for cookie in self.make_cookies(response, request): if self._policy.set_ok(cookie, request): _debug(" setting cookie: %s", cookie) self.set_cookie(cookie) finally: self._cookies_lock.release() def clear(self, domain=None, path=None, name=None): """Clear some cookies. Invoking this method without arguments will clear all cookies. If given a single argument, only cookies belonging to that domain will be removed. If given two arguments, cookies belonging to the specified path within that domain are removed. If given three arguments, then the cookie with the specified name, path and domain is removed. Raises KeyError if no matching cookie exists. """ if name is not None: if (domain is None) or (path is None): raise ValueError( "domain and path must be given to remove a cookie by name") del self._cookies[domain][path][name] elif path is not None: if domain is None: raise ValueError( "domain must be given to remove cookies by path") del self._cookies[domain][path] elif domain is not None: del self._cookies[domain] else: self._cookies = {} def clear_session_cookies(self): """Discard all session cookies. Note that the .save() method won't save session cookies anyway, unless you ask otherwise by passing a true ignore_discard argument. """ self._cookies_lock.acquire() try: for cookie in self: if cookie.discard: self.clear(cookie.domain, cookie.path, cookie.name) finally: self._cookies_lock.release() def clear_expired_cookies(self): """Discard all expired cookies. You probably don't need to call this method: expired cookies are never sent back to the server (provided you're using DefaultCookiePolicy), this method is called by CookieJar itself every so often, and the .save() method won't save expired cookies anyway (unless you ask otherwise by passing a true ignore_expires argument). """ self._cookies_lock.acquire() try: now = time.time() for cookie in self: if cookie.is_expired(now): self.clear(cookie.domain, cookie.path, cookie.name) finally: self._cookies_lock.release() def __iter__(self): return deepvalues(self._cookies) def __len__(self): """Return number of contained cookies.""" i = 0 for cookie in self: i = i + 1 return i def __repr__(self): r = [] for cookie in self: r.append(repr(cookie)) return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r)) def __str__(self): r = [] for cookie in self: r.append(str(cookie)) return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r)) # derives from OSError for backwards-compatibility with Python 2.4.0 class LoadError(OSError): pass class FileCookieJar(CookieJar): """CookieJar that can be loaded from and saved to a file.""" def __init__(self, filename=None, delayload=False, policy=None): """ Cookies are NOT loaded from the named file until either the .load() or .revert() method is called. """ CookieJar.__init__(self, policy) if filename is not None: try: filename+"" except: raise ValueError("filename must be string-like") self.filename = filename self.delayload = bool(delayload) def save(self, filename=None, ignore_discard=False, ignore_expires=False): """Save cookies to a file.""" raise NotImplementedError() def load(self, filename=None, ignore_discard=False, ignore_expires=False): """Load cookies from a file.""" if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) with open(filename) as f: self._really_load(f, filename, ignore_discard, ignore_expires) def revert(self, filename=None, ignore_discard=False, ignore_expires=False): """Clear all cookies and reload cookies from a saved file. Raises LoadError (or OSError) if reversion is not successful; the object's state will not be altered if this happens. """ if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) self._cookies_lock.acquire() try: old_state = copy.deepcopy(self._cookies) self._cookies = {} try: self.load(filename, ignore_discard, ignore_expires) except OSError: self._cookies = old_state raise finally: self._cookies_lock.release() def lwp_cookie_str(cookie): """Return string representation of Cookie in the LWP cookie file format. Actually, the format is extended a bit -- see module docstring. """ h = [(cookie.name, cookie.value), ("path", cookie.path), ("domain", cookie.domain)] if cookie.port is not None: h.append(("port", cookie.port)) if cookie.path_specified: h.append(("path_spec", None)) if cookie.port_specified: h.append(("port_spec", None)) if cookie.domain_initial_dot: h.append(("domain_dot", None)) if cookie.secure: h.append(("secure", None)) if cookie.expires: h.append(("expires", time2isoz(float(cookie.expires)))) if cookie.discard: h.append(("discard", None)) if cookie.comment: h.append(("comment", cookie.comment)) if cookie.comment_url: h.append(("commenturl", cookie.comment_url)) keys = sorted(cookie._rest.keys()) for k in keys: h.append((k, str(cookie._rest[k]))) h.append(("version", str(cookie.version))) return join_header_words([h]) class LWPCookieJar(FileCookieJar): """ The LWPCookieJar saves a sequence of "Set-Cookie3" lines. "Set-Cookie3" is the format used by the libwww-perl library, not known to be compatible with any browser, but which is easy to read and doesn't lose information about RFC 2965 cookies. Additional methods as_lwp_str(ignore_discard=True, ignore_expired=True) """ def as_lwp_str(self, ignore_discard=True, ignore_expires=True): """Return cookies as a string of "\\n"-separated "Set-Cookie3" headers. ignore_discard and ignore_expires: see docstring for FileCookieJar.save """ now = time.time() r = [] for cookie in self: if not ignore_discard and cookie.discard: continue if not ignore_expires and cookie.is_expired(now): continue r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie)) return "\n".join(r+[""]) def save(self, filename=None, ignore_discard=False, ignore_expires=False): if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) with open(filename, "w") as f: # There really isn't an LWP Cookies 2.0 format, but this indicates # that there is extra information in here (domain_dot and # port_spec) while still being compatible with libwww-perl, I hope. f.write("#LWP-Cookies-2.0\n") f.write(self.as_lwp_str(ignore_discard, ignore_expires)) def _really_load(self, f, filename, ignore_discard, ignore_expires): magic = f.readline() if not self.magic_re.search(magic): msg = ("%r does not look like a Set-Cookie3 (LWP) format " "file" % filename) raise LoadError(msg) now = time.time() header = "Set-Cookie3:" boolean_attrs = ("port_spec", "path_spec", "domain_dot", "secure", "discard") value_attrs = ("version", "port", "path", "domain", "expires", "comment", "commenturl") try: while 1: line = f.readline() if line == "": break if not line.startswith(header): continue line = line[len(header):].strip() for data in split_header_words([line]): name, value = data[0] standard = {} rest = {} for k in boolean_attrs: standard[k] = False for k, v in data[1:]: if k is not None: lc = k.lower() else: lc = None # don't lose case distinction for unknown fields if (lc in value_attrs) or (lc in boolean_attrs): k = lc if k in boolean_attrs: if v is None: v = True standard[k] = v elif k in value_attrs: standard[k] = v else: rest[k] = v h = standard.get expires = h("expires") discard = h("discard") if expires is not None: expires = iso2time(expires) if expires is None: discard = True domain = h("domain") domain_specified = domain.startswith(".") c = Cookie(h("version"), name, value, h("port"), h("port_spec"), domain, domain_specified, h("domain_dot"), h("path"), h("path_spec"), h("secure"), expires, discard, h("comment"), h("commenturl"), rest) if not ignore_discard and c.discard: continue if not ignore_expires and c.is_expired(now): continue self.set_cookie(c) except OSError: raise except Exception: _warn_unhandled_exception() raise LoadError("invalid Set-Cookie3 format file %r: %r" % (filename, line)) class MozillaCookieJar(FileCookieJar): """ WARNING: you may want to backup your browser's cookies file if you use this class to save cookies. I *think* it works, but there have been bugs in the past! This class differs from CookieJar only in the format it uses to save and load cookies to and from a file. This class uses the Mozilla/Netscape `cookies.txt' format. lynx uses this file format, too. Don't expect cookies saved while the browser is running to be noticed by the browser (in fact, Mozilla on unix will overwrite your saved cookies if you change them on disk while it's running; on Windows, you probably can't save at all while the browser is running). Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to Netscape cookies on saving. In particular, the cookie version and port number information is lost, together with information about whether or not Path, Port and Discard were specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the domain as set in the HTTP header started with a dot (yes, I'm aware some domains in Netscape files start with a dot and some don't -- trust me, you really don't want to know any more about this). Note that though Mozilla and Netscape use the same format, they use slightly different headers. The class saves cookies using the Netscape header by default (Mozilla can cope with that). """ magic_re = re.compile("#( Netscape)? HTTP Cookie File") header = """\ # Netscape HTTP Cookie File # http://curl.haxx.se/rfc/cookie_spec.html # This is a generated file! Do not edit. """ def _really_load(self, f, filename, ignore_discard, ignore_expires): now = time.time() magic = f.readline() if not self.magic_re.search(magic): raise LoadError( "%r does not look like a Netscape format cookies file" % filename) try: while 1: line = f.readline() if line == "": break # last field may be absent, so keep any trailing tab if line.endswith("\n"): line = line[:-1] # skip comments and blank lines XXX what is $ for? if (line.strip().startswith(("#", "$")) or line.strip() == ""): continue domain, domain_specified, path, secure, expires, name, value = \ line.split("\t") secure = (secure == "TRUE") domain_specified = (domain_specified == "TRUE") if name == "": # cookies.txt regards 'Set-Cookie: foo' as a cookie # with no name, whereas http.cookiejar regards it as a # cookie with no value. name = value value = None initial_dot = domain.startswith(".") assert domain_specified == initial_dot discard = False if expires == "": expires = None discard = True # assume path_specified is false c = Cookie(0, name, value, None, False, domain, domain_specified, initial_dot, path, False, secure, expires, discard, None, None, {}) if not ignore_discard and c.discard: continue if not ignore_expires and c.is_expired(now): continue self.set_cookie(c) except OSError: raise except Exception: _warn_unhandled_exception() raise LoadError("invalid Netscape format cookies file %r: %r" % (filename, line)) def save(self, filename=None, ignore_discard=False, ignore_expires=False): if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) with open(filename, "w") as f: f.write(self.header) now = time.time() for cookie in self: if not ignore_discard and cookie.discard: continue if not ignore_expires and cookie.is_expired(now): continue if cookie.secure: secure = "TRUE" else: secure = "FALSE" if cookie.domain.startswith("."): initial_dot = "TRUE" else: initial_dot = "FALSE" if cookie.expires is not None: expires = str(cookie.expires) else: expires = "" if cookie.value is None: # cookies.txt regards 'Set-Cookie: foo' as a cookie # with no name, whereas http.cookiejar regards it as a # cookie with no value. name = "" value = cookie.name else: name = cookie.name value = cookie.value f.write( "\t".join([cookie.domain, initial_dot, cookie.path, secure, expires, name, value])+ "\n") eventlet-0.30.2/eventlet/green/http/cookies.py0000644000076500000240000005717414006212666022010 0ustar temotostaff00000000000000# This is part of Python source code with Eventlet-specific modifications. # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights # Reserved # # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 # -------------------------------------------- # # 1. This LICENSE AGREEMENT is between the Python Software Foundation # ("PSF"), and the Individual or Organization ("Licensee") accessing and # otherwise using this software ("Python") in source or binary form and # its associated documentation. # # 2. Subject to the terms and conditions of this License Agreement, PSF hereby # grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, # analyze, test, perform and/or display publicly, prepare derivative works, # distribute, and otherwise use Python alone or in any derivative version, # provided, however, that PSF's License Agreement and PSF's notice of copyright, # i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights # Reserved" are retained in Python alone or in any derivative version prepared by # Licensee. # # 3. In the event Licensee prepares a derivative work that is based on # or incorporates Python or any part thereof, and wants to make # the derivative work available to others as provided herein, then # Licensee hereby agrees to include in any such work a brief summary of # the changes made to Python. # # 4. PSF is making Python available to Licensee on an "AS IS" # basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR # IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND # DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS # FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT # INFRINGE ANY THIRD PARTY RIGHTS. # # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON # FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS # A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, # OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. # # 6. This License Agreement will automatically terminate upon a material # breach of its terms and conditions. # # 7. Nothing in this License Agreement shall be deemed to create any # relationship of agency, partnership, or joint venture between PSF and # Licensee. This License Agreement does not grant permission to use PSF # trademarks or trade name in a trademark sense to endorse or promote # products or services of Licensee, or any third party. # # 8. By copying, installing or otherwise using Python, Licensee # agrees to be bound by the terms and conditions of this License # Agreement. #### # Copyright 2000 by Timothy O'Malley # # All Rights Reserved # # Permission to use, copy, modify, and distribute this software # and its documentation for any purpose and without fee is hereby # granted, provided that the above copyright notice appear in all # copies and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Timothy O'Malley not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS # SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR # ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. # #### # # Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp # by Timothy O'Malley # # Cookie.py is a Python module for the handling of HTTP # cookies as a Python dictionary. See RFC 2109 for more # information on cookies. # # The original idea to treat Cookies as a dictionary came from # Dave Mitchell (davem@magnet.com) in 1995, when he released the # first version of nscookie.py. # #### r""" Here's a sample session to show how to use this module. At the moment, this is the only documentation. The Basics ---------- Importing is easy... >>> from http import cookies Most of the time you start by creating a cookie. >>> C = cookies.SimpleCookie() Once you've created your Cookie, you can add values just as if it were a dictionary. >>> C = cookies.SimpleCookie() >>> C["fig"] = "newton" >>> C["sugar"] = "wafer" >>> C.output() 'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer' Notice that the printable representation of a Cookie is the appropriate format for a Set-Cookie: header. This is the default behavior. You can change the header and printed attributes by using the .output() function >>> C = cookies.SimpleCookie() >>> C["rocky"] = "road" >>> C["rocky"]["path"] = "/cookie" >>> print(C.output(header="Cookie:")) Cookie: rocky=road; Path=/cookie >>> print(C.output(attrs=[], header="Cookie:")) Cookie: rocky=road The load() method of a Cookie extracts cookies from a string. In a CGI script, you would use this method to extract the cookies from the HTTP_COOKIE environment variable. >>> C = cookies.SimpleCookie() >>> C.load("chips=ahoy; vienna=finger") >>> C.output() 'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger' The load() method is darn-tootin smart about identifying cookies within a string. Escaped quotation marks, nested semicolons, and other such trickeries do not confuse it. >>> C = cookies.SimpleCookie() >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";') >>> print(C) Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;" Each element of the Cookie also supports all of the RFC 2109 Cookie attributes. Here's an example which sets the Path attribute. >>> C = cookies.SimpleCookie() >>> C["oreo"] = "doublestuff" >>> C["oreo"]["path"] = "/" >>> print(C) Set-Cookie: oreo=doublestuff; Path=/ Each dictionary element has a 'value' attribute, which gives you back the value associated with the key. >>> C = cookies.SimpleCookie() >>> C["twix"] = "none for you" >>> C["twix"].value 'none for you' The SimpleCookie expects that all values should be standard strings. Just to be sure, SimpleCookie invokes the str() builtin to convert the value to a string, when the values are set dictionary-style. >>> C = cookies.SimpleCookie() >>> C["number"] = 7 >>> C["string"] = "seven" >>> C["number"].value '7' >>> C["string"].value 'seven' >>> C.output() 'Set-Cookie: number=7\r\nSet-Cookie: string=seven' Finis. """ # # Import our required modules # import re import string __all__ = ["CookieError", "BaseCookie", "SimpleCookie"] _nulljoin = ''.join _semispacejoin = '; '.join _spacejoin = ' '.join def _warn_deprecated_setter(setter): import warnings msg = ('The .%s setter is deprecated. The attribute will be read-only in ' 'future releases. Please use the set() method instead.' % setter) warnings.warn(msg, DeprecationWarning, stacklevel=3) # # Define an exception visible to External modules # class CookieError(Exception): pass # These quoting routines conform to the RFC2109 specification, which in # turn references the character definitions from RFC2068. They provide # a two-way quoting algorithm. Any non-text character is translated # into a 4 character sequence: a forward-slash followed by the # three-digit octal equivalent of the character. Any '\' or '"' is # quoted with a preceding '\' slash. # Because of the way browsers really handle cookies (as opposed to what # the RFC says) we also encode "," and ";". # # These are taken from RFC2068 and RFC2109. # _LegalChars is the list of chars which don't require "'s # _Translator hash-table for fast quoting # _LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:" _UnescapedChars = _LegalChars + ' ()/<=>?@[]{}' _Translator = {n: '\\%03o' % n for n in set(range(256)) - set(map(ord, _UnescapedChars))} _Translator.update({ ord('"'): '\\"', ord('\\'): '\\\\', }) # Eventlet change: match used instead of fullmatch for Python 3.3 compatibility _is_legal_key = re.compile(r'[%s]+\Z' % re.escape(_LegalChars)).match def _quote(str): r"""Quote a string for use in a cookie header. If the string does not need to be double-quoted, then just return the string. Otherwise, surround the string in doublequotes and quote (with a \) special characters. """ if str is None or _is_legal_key(str): return str else: return '"' + str.translate(_Translator) + '"' _OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") _QuotePatt = re.compile(r"[\\].") def _unquote(str): # If there aren't any doublequotes, # then there can't be any special characters. See RFC 2109. if str is None or len(str) < 2: return str if str[0] != '"' or str[-1] != '"': return str # We have to assume that we must decode this string. # Down to work. # Remove the "s str = str[1:-1] # Check for special sequences. Examples: # \012 --> \n # \" --> " # i = 0 n = len(str) res = [] while 0 <= i < n: o_match = _OctalPatt.search(str, i) q_match = _QuotePatt.search(str, i) if not o_match and not q_match: # Neither matched res.append(str[i:]) break # else: j = k = -1 if o_match: j = o_match.start(0) if q_match: k = q_match.start(0) if q_match and (not o_match or k < j): # QuotePatt matched res.append(str[i:k]) res.append(str[k+1]) i = k + 2 else: # OctalPatt matched res.append(str[i:j]) res.append(chr(int(str[j+1:j+4], 8))) i = j + 4 return _nulljoin(res) # The _getdate() routine is used to set the expiration time in the cookie's HTTP # header. By default, _getdate() returns the current time in the appropriate # "expires" format for a Set-Cookie header. The one optional argument is an # offset from now, in seconds. For example, an offset of -3600 means "one hour # ago". The offset may be a floating point number. # _weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] _monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname): from eventlet.green.time import gmtime, time now = time() year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future) return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \ (weekdayname[wd], day, monthname[month], year, hh, mm, ss) class Morsel(dict): """A class to hold ONE (key, value) pair. In a cookie, each such pair may have several attributes, so this class is used to keep the attributes associated with the appropriate key,value pair. This class also includes a coded_value attribute, which is used to hold the network representation of the value. This is most useful when Python objects are pickled for network transit. """ # RFC 2109 lists these attributes as reserved: # path comment domain # max-age secure version # # For historical reasons, these attributes are also reserved: # expires # # This is an extension from Microsoft: # httponly # # This dictionary provides a mapping from the lowercase # variant on the left to the appropriate traditional # formatting on the right. _reserved = { "expires" : "expires", "path" : "Path", "comment" : "Comment", "domain" : "Domain", "max-age" : "Max-Age", "secure" : "Secure", "httponly" : "HttpOnly", "version" : "Version", } _flags = {'secure', 'httponly'} def __init__(self): # Set defaults self._key = self._value = self._coded_value = None # Set default attributes for key in self._reserved: dict.__setitem__(self, key, "") @property def key(self): return self._key @key.setter def key(self, key): _warn_deprecated_setter('key') self._key = key @property def value(self): return self._value @value.setter def value(self, value): _warn_deprecated_setter('value') self._value = value @property def coded_value(self): return self._coded_value @coded_value.setter def coded_value(self, coded_value): _warn_deprecated_setter('coded_value') self._coded_value = coded_value def __setitem__(self, K, V): K = K.lower() if not K in self._reserved: raise CookieError("Invalid attribute %r" % (K,)) dict.__setitem__(self, K, V) def setdefault(self, key, val=None): key = key.lower() if key not in self._reserved: raise CookieError("Invalid attribute %r" % (key,)) return dict.setdefault(self, key, val) def __eq__(self, morsel): if not isinstance(morsel, Morsel): return NotImplemented return (dict.__eq__(self, morsel) and self._value == morsel._value and self._key == morsel._key and self._coded_value == morsel._coded_value) __ne__ = object.__ne__ def copy(self): morsel = Morsel() dict.update(morsel, self) morsel.__dict__.update(self.__dict__) return morsel def update(self, values): data = {} for key, val in dict(values).items(): key = key.lower() if key not in self._reserved: raise CookieError("Invalid attribute %r" % (key,)) data[key] = val dict.update(self, data) def isReservedKey(self, K): return K.lower() in self._reserved def set(self, key, val, coded_val, LegalChars=_LegalChars): if LegalChars != _LegalChars: import warnings warnings.warn( 'LegalChars parameter is deprecated, ignored and will ' 'be removed in future versions.', DeprecationWarning, stacklevel=2) if key.lower() in self._reserved: raise CookieError('Attempt to set a reserved key %r' % (key,)) if not _is_legal_key(key): raise CookieError('Illegal key %r' % (key,)) # It's a good key, so save it. self._key = key self._value = val self._coded_value = coded_val def __getstate__(self): return { 'key': self._key, 'value': self._value, 'coded_value': self._coded_value, } def __setstate__(self, state): self._key = state['key'] self._value = state['value'] self._coded_value = state['coded_value'] def output(self, attrs=None, header="Set-Cookie:"): return "%s %s" % (header, self.OutputString(attrs)) __str__ = output def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.OutputString()) def js_output(self, attrs=None): # Print javascript return """ """ % (self.OutputString(attrs).replace('"', r'\"')) def OutputString(self, attrs=None): # Build up our result # result = [] append = result.append # First, the key=value pair append("%s=%s" % (self.key, self.coded_value)) # Now add any defined attributes if attrs is None: attrs = self._reserved items = sorted(self.items()) for key, value in items: if value == "": continue if key not in attrs: continue if key == "expires" and isinstance(value, int): append("%s=%s" % (self._reserved[key], _getdate(value))) elif key == "max-age" and isinstance(value, int): append("%s=%d" % (self._reserved[key], value)) elif key in self._flags: if value: append(str(self._reserved[key])) else: append("%s=%s" % (self._reserved[key], value)) # Return the result return _semispacejoin(result) # # Pattern for finding cookie # # This used to be strict parsing based on the RFC2109 and RFC2068 # specifications. I have since discovered that MSIE 3.0x doesn't # follow the character rules outlined in those specs. As a # result, the parsing rules here are less strict. # _LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=" _LegalValueChars = _LegalKeyChars + '\[\]' _CookiePattern = re.compile(r""" (?x) # This is a verbose pattern \s* # Optional whitespace at start of cookie (?P # Start of group 'key' [""" + _LegalKeyChars + r"""]+? # Any word of at least one letter ) # End of group 'key' ( # Optional group: there may not be a value. \s*=\s* # Equal Sign (?P # Start of group 'val' "(?:[^\\"]|\\.)*" # Any doublequoted string | # or \w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr | # or [""" + _LegalValueChars + r"""]* # Any word or empty string ) # End of group 'val' )? # End of optional value group \s* # Any number of spaces. (\s+|;|$) # Ending either at space, semicolon, or EOS. """, re.ASCII) # May be removed if safe. # At long last, here is the cookie class. Using this class is almost just like # using a dictionary. See this module's docstring for example usage. # class BaseCookie(dict): """A container class for a set of Morsels.""" def value_decode(self, val): """real_value, coded_value = value_decode(STRING) Called prior to setting a cookie's value from the network representation. The VALUE is the value read from HTTP header. Override this function to modify the behavior of cookies. """ return val, val def value_encode(self, val): """real_value, coded_value = value_encode(VALUE) Called prior to setting a cookie's value from the dictionary representation. The VALUE is the value being assigned. Override this function to modify the behavior of cookies. """ strval = str(val) return strval, strval def __init__(self, input=None): if input: self.load(input) def __set(self, key, real_value, coded_value): """Private method for setting a cookie's value""" M = self.get(key, Morsel()) M.set(key, real_value, coded_value) dict.__setitem__(self, key, M) def __setitem__(self, key, value): """Dictionary style assignment.""" if isinstance(value, Morsel): # allow assignment of constructed Morsels (e.g. for pickling) dict.__setitem__(self, key, value) else: rval, cval = self.value_encode(value) self.__set(key, rval, cval) def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"): """Return a string suitable for HTTP.""" result = [] items = sorted(self.items()) for key, value in items: result.append(value.output(attrs, header)) return sep.join(result) __str__ = output def __repr__(self): l = [] items = sorted(self.items()) for key, value in items: l.append('%s=%s' % (key, repr(value.value))) return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l)) def js_output(self, attrs=None): """Return a string suitable for JavaScript.""" result = [] items = sorted(self.items()) for key, value in items: result.append(value.js_output(attrs)) return _nulljoin(result) def load(self, rawdata): """Load cookies from a string (presumably HTTP_COOKIE) or from a dictionary. Loading cookies from a dictionary 'd' is equivalent to calling: map(Cookie.__setitem__, d.keys(), d.values()) """ if isinstance(rawdata, str): self.__parse_string(rawdata) else: # self.update() wouldn't call our custom __setitem__ for key, value in rawdata.items(): self[key] = value return def __parse_string(self, str, patt=_CookiePattern): i = 0 # Our starting point n = len(str) # Length of string parsed_items = [] # Parsed (type, key, value) triples morsel_seen = False # A key=value pair was previously encountered TYPE_ATTRIBUTE = 1 TYPE_KEYVALUE = 2 # We first parse the whole cookie string and reject it if it's # syntactically invalid (this helps avoid some classes of injection # attacks). while 0 <= i < n: # Start looking for a cookie match = patt.match(str, i) if not match: # No more cookies break key, value = match.group("key"), match.group("val") i = match.end(0) if key[0] == "$": if not morsel_seen: # We ignore attributes which pertain to the cookie # mechanism as a whole, such as "$Version". # See RFC 2965. (Does anyone care?) continue parsed_items.append((TYPE_ATTRIBUTE, key[1:], value)) elif key.lower() in Morsel._reserved: if not morsel_seen: # Invalid cookie string return if value is None: if key.lower() in Morsel._flags: parsed_items.append((TYPE_ATTRIBUTE, key, True)) else: # Invalid cookie string return else: parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value))) elif value is not None: parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value))) morsel_seen = True else: # Invalid cookie string return # The cookie string is valid, apply it. M = None # current morsel for tp, key, value in parsed_items: if tp == TYPE_ATTRIBUTE: assert M is not None M[key] = value else: assert tp == TYPE_KEYVALUE rval, cval = value self.__set(key, rval, cval) M = self[key] class SimpleCookie(BaseCookie): """ SimpleCookie supports strings as cookie values. When setting the value using the dictionary assignment notation, SimpleCookie calls the builtin str() to convert the value to a string. Values received from HTTP are kept as strings. """ def value_decode(self, val): return _unquote(val), val def value_encode(self, val): strval = str(val) return strval, _quote(strval) eventlet-0.30.2/eventlet/green/http/server.py0000644000076500000240000013300414006212666021645 0ustar temotostaff00000000000000# This is part of Python source code with Eventlet-specific modifications. # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights # Reserved # # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 # -------------------------------------------- # # 1. This LICENSE AGREEMENT is between the Python Software Foundation # ("PSF"), and the Individual or Organization ("Licensee") accessing and # otherwise using this software ("Python") in source or binary form and # its associated documentation. # # 2. Subject to the terms and conditions of this License Agreement, PSF hereby # grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, # analyze, test, perform and/or display publicly, prepare derivative works, # distribute, and otherwise use Python alone or in any derivative version, # provided, however, that PSF's License Agreement and PSF's notice of copyright, # i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights # Reserved" are retained in Python alone or in any derivative version prepared by # Licensee. # # 3. In the event Licensee prepares a derivative work that is based on # or incorporates Python or any part thereof, and wants to make # the derivative work available to others as provided herein, then # Licensee hereby agrees to include in any such work a brief summary of # the changes made to Python. # # 4. PSF is making Python available to Licensee on an "AS IS" # basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR # IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND # DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS # FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT # INFRINGE ANY THIRD PARTY RIGHTS. # # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON # FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS # A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, # OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. # # 6. This License Agreement will automatically terminate upon a material # breach of its terms and conditions. # # 7. Nothing in this License Agreement shall be deemed to create any # relationship of agency, partnership, or joint venture between PSF and # Licensee. This License Agreement does not grant permission to use PSF # trademarks or trade name in a trademark sense to endorse or promote # products or services of Licensee, or any third party. # # 8. By copying, installing or otherwise using Python, Licensee # agrees to be bound by the terms and conditions of this License # Agreement. """HTTP server classes. Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, and CGIHTTPRequestHandler for CGI scripts. It does, however, optionally implement HTTP/1.1 persistent connections, as of version 0.3. Notes on CGIHTTPRequestHandler ------------------------------ This class implements GET and POST requests to cgi-bin scripts. If the os.fork() function is not present (e.g. on Windows), subprocess.Popen() is used as a fallback, with slightly altered semantics. In all cases, the implementation is intentionally naive -- all requests are executed synchronously. SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL -- it may execute arbitrary Python code or external programs. Note that status code 200 is sent prior to execution of a CGI script, so scripts cannot send other status codes such as 302 (redirect). XXX To do: - log requests even later (to capture byte count) - log user-agent header and other interesting goodies - send error log to separate file """ # See also: # # HTTP Working Group T. Berners-Lee # INTERNET-DRAFT R. T. Fielding # H. Frystyk Nielsen # Expires September 8, 1995 March 8, 1995 # # URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt # # and # # Network Working Group R. Fielding # Request for Comments: 2616 et al # Obsoletes: 2068 June 1999 # Category: Standards Track # # URL: http://www.faqs.org/rfcs/rfc2616.html # Log files # --------- # # Here's a quote from the NCSA httpd docs about log file format. # # | The logfile format is as follows. Each line consists of: # | # | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb # | # | host: Either the DNS name or the IP number of the remote client # | rfc931: Any information returned by identd for this person, # | - otherwise. # | authuser: If user sent a userid for authentication, the user name, # | - otherwise. # | DD: Day # | Mon: Month (calendar name) # | YYYY: Year # | hh: hour (24-hour format, the machine's timezone) # | mm: minutes # | ss: seconds # | request: The first line of the HTTP request as sent by the client. # | ddd: the status code returned by the server, - if not available. # | bbbb: the total number of bytes sent, # | *not including the HTTP/1.0 header*, - if not available # | # | You can determine the name of the file accessed through request. # # (Actually, the latter is only true if you know the server configuration # at the time the request was made!) __version__ = "0.6" __all__ = [ "HTTPServer", "BaseHTTPRequestHandler", "SimpleHTTPRequestHandler", "CGIHTTPRequestHandler", ] import email.utils import html import io import mimetypes import posixpath import shutil import sys import urllib.parse import copy import argparse from eventlet.green import ( os, time, select, socket, SocketServer as socketserver, subprocess, ) from eventlet.green.http import client as http_client, HTTPStatus # Default error message template DEFAULT_ERROR_MESSAGE = """\ Error response

Error response

Error code: %(code)d

Message: %(message)s.

Error code explanation: %(code)s - %(explain)s.

""" DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8" class HTTPServer(socketserver.TCPServer): allow_reuse_address = 1 # Seems to make sense in testing environment def server_bind(self): """Override server_bind to store the server name.""" socketserver.TCPServer.server_bind(self) host, port = self.server_address[:2] self.server_name = socket.getfqdn(host) self.server_port = port class BaseHTTPRequestHandler(socketserver.StreamRequestHandler): """HTTP request handler base class. The following explanation of HTTP serves to guide you through the code as well as to expose any misunderstandings I may have about HTTP (so you don't need to read the code to figure out I'm wrong :-). HTTP (HyperText Transfer Protocol) is an extensible protocol on top of a reliable stream transport (e.g. TCP/IP). The protocol recognizes three parts to a request: 1. One line identifying the request type and path 2. An optional set of RFC-822-style headers 3. An optional data part The headers and data are separated by a blank line. The first line of the request has the form where is a (case-sensitive) keyword such as GET or POST, is a string containing path information for the request, and should be the string "HTTP/1.0" or "HTTP/1.1". is encoded using the URL encoding scheme (using %xx to signify the ASCII character with hex code xx). The specification specifies that lines are separated by CRLF but for compatibility with the widest range of clients recommends servers also handle LF. Similarly, whitespace in the request line is treated sensibly (allowing multiple spaces between components and allowing trailing whitespace). Similarly, for output, lines ought to be separated by CRLF pairs but most clients grok LF characters just fine. If the first line of the request has the form (i.e. is left out) then this is assumed to be an HTTP 0.9 request; this form has no optional headers and data part and the reply consists of just the data. The reply form of the HTTP 1.x protocol again has three parts: 1. One line giving the response code 2. An optional set of RFC-822-style headers 3. The data Again, the headers and data are separated by a blank line. The response code line has the form where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), is a 3-digit response code indicating success or failure of the request, and is an optional human-readable string explaining what the response code means. This server parses the request and the headers, and then calls a function specific to the request type (). Specifically, a request SPAM will be handled by a method do_SPAM(). If no such method exists the server sends an error response to the client. If it exists, it is called with no arguments: do_SPAM() Note that the request name is case sensitive (i.e. SPAM and spam are different requests). The various request details are stored in instance variables: - client_address is the client IP address in the form (host, port); - command, path and version are the broken-down request line; - headers is an instance of email.message.Message (or a derived class) containing the header information; - rfile is a file object open for reading positioned at the start of the optional input data part; - wfile is a file object open for writing. IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! The first thing to be written must be the response line. Then follow 0 or more header lines, then a blank line, and then the actual data (if any). The meaning of the header lines depends on the command executed by the server; in most cases, when data is returned, there should be at least one header line of the form Content-type: / where and should be registered MIME types, e.g. "text/html" or "text/plain". """ # The Python system version, truncated to its first component. sys_version = "Python/" + sys.version.split()[0] # The server software version. You may want to override this. # The format is multiple whitespace-separated strings, # where each string is of the form name[/version]. server_version = "BaseHTTP/" + __version__ error_message_format = DEFAULT_ERROR_MESSAGE error_content_type = DEFAULT_ERROR_CONTENT_TYPE # The default request version. This only affects responses up until # the point where the request line is parsed, so it mainly decides what # the client gets back when sending a malformed request line. # Most web servers default to HTTP 0.9, i.e. don't send a status line. default_request_version = "HTTP/0.9" def parse_request(self): """Parse a request (internal). The request should be stored in self.raw_requestline; the results are in self.command, self.path, self.request_version and self.headers. Return True for success, False for failure; on failure, an error is sent back. """ self.command = None # set in case of error on the first line self.request_version = version = self.default_request_version self.close_connection = True requestline = str(self.raw_requestline, 'iso-8859-1') requestline = requestline.rstrip('\r\n') self.requestline = requestline words = requestline.split() if len(words) == 3: command, path, version = words try: if version[:5] != 'HTTP/': raise ValueError base_version_number = version.split('/', 1)[1] version_number = base_version_number.split(".") # RFC 2145 section 3.1 says there can be only one "." and # - major and minor numbers MUST be treated as # separate integers; # - HTTP/2.4 is a lower version than HTTP/2.13, which in # turn is lower than HTTP/12.3; # - Leading zeros MUST be ignored by recipients. if len(version_number) != 2: raise ValueError version_number = int(version_number[0]), int(version_number[1]) except (ValueError, IndexError): self.send_error( HTTPStatus.BAD_REQUEST, "Bad request version (%r)" % version) return False if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": self.close_connection = False if version_number >= (2, 0): self.send_error( HTTPStatus.HTTP_VERSION_NOT_SUPPORTED, "Invalid HTTP version (%s)" % base_version_number) return False elif len(words) == 2: command, path = words self.close_connection = True if command != 'GET': self.send_error( HTTPStatus.BAD_REQUEST, "Bad HTTP/0.9 request type (%r)" % command) return False elif not words: return False else: self.send_error( HTTPStatus.BAD_REQUEST, "Bad request syntax (%r)" % requestline) return False self.command, self.path, self.request_version = command, path, version # Examine the headers and look for a Connection directive. try: self.headers = http_client.parse_headers(self.rfile, _class=self.MessageClass) except http_client.LineTooLong as err: self.send_error( HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, "Line too long", str(err)) return False except http_client.HTTPException as err: self.send_error( HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, "Too many headers", str(err) ) return False conntype = self.headers.get('Connection', "") if conntype.lower() == 'close': self.close_connection = True elif (conntype.lower() == 'keep-alive' and self.protocol_version >= "HTTP/1.1"): self.close_connection = False # Examine the headers and look for an Expect directive expect = self.headers.get('Expect', "") if (expect.lower() == "100-continue" and self.protocol_version >= "HTTP/1.1" and self.request_version >= "HTTP/1.1"): if not self.handle_expect_100(): return False return True def handle_expect_100(self): """Decide what to do with an "Expect: 100-continue" header. If the client is expecting a 100 Continue response, we must respond with either a 100 Continue or a final response before waiting for the request body. The default is to always respond with a 100 Continue. You can behave differently (for example, reject unauthorized requests) by overriding this method. This method should either return True (possibly after sending a 100 Continue response) or send an error response and return False. """ self.send_response_only(HTTPStatus.CONTINUE) self.end_headers() return True def handle_one_request(self): """Handle a single HTTP request. You normally don't need to override this method; see the class __doc__ string for information on how to handle specific HTTP commands such as GET and POST. """ try: self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG) return if not self.raw_requestline: self.close_connection = True return if not self.parse_request(): # An error code has been sent, just exit return mname = 'do_' + self.command if not hasattr(self, mname): self.send_error( HTTPStatus.NOT_IMPLEMENTED, "Unsupported method (%r)" % self.command) return method = getattr(self, mname) method() self.wfile.flush() #actually send the response if not already done. except socket.timeout as e: #a read or a write timed out. Discard this connection self.log_error("Request timed out: %r", e) self.close_connection = True return def handle(self): """Handle multiple requests if necessary.""" self.close_connection = True self.handle_one_request() while not self.close_connection: self.handle_one_request() def send_error(self, code, message=None, explain=None): """Send and log an error reply. Arguments are * code: an HTTP error code 3 digits * message: a simple optional 1 line reason phrase. *( HTAB / SP / VCHAR / %x80-FF ) defaults to short entry matching the response code * explain: a detailed message defaults to the long entry matching the response code. This sends an error response (so it must be called before any output has been generated), logs the error, and finally sends a piece of HTML explaining the error to the user. """ try: shortmsg, longmsg = self.responses[code] except KeyError: shortmsg, longmsg = '???', '???' if message is None: message = shortmsg if explain is None: explain = longmsg self.log_error("code %d, message %s", code, message) self.send_response(code, message) self.send_header('Connection', 'close') # Message body is omitted for cases described in: # - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified) # - RFC7231: 6.3.6. 205(Reset Content) body = None if (code >= 200 and code not in (HTTPStatus.NO_CONTENT, HTTPStatus.RESET_CONTENT, HTTPStatus.NOT_MODIFIED)): # HTML encode to prevent Cross Site Scripting attacks # (see bug #1100201) content = (self.error_message_format % { 'code': code, 'message': html.escape(message, quote=False), 'explain': html.escape(explain, quote=False) }) body = content.encode('UTF-8', 'replace') self.send_header("Content-Type", self.error_content_type) self.send_header('Content-Length', int(len(body))) self.end_headers() if self.command != 'HEAD' and body: self.wfile.write(body) def send_response(self, code, message=None): """Add the response header to the headers buffer and log the response code. Also send two standard headers with the server software version and the current date. """ self.log_request(code) self.send_response_only(code, message) self.send_header('Server', self.version_string()) self.send_header('Date', self.date_time_string()) def send_response_only(self, code, message=None): """Send the response header only.""" if self.request_version != 'HTTP/0.9': if message is None: if code in self.responses: message = self.responses[code][0] else: message = '' if not hasattr(self, '_headers_buffer'): self._headers_buffer = [] self._headers_buffer.append(("%s %d %s\r\n" % (self.protocol_version, code, message)).encode( 'latin-1', 'strict')) def send_header(self, keyword, value): """Send a MIME header to the headers buffer.""" if self.request_version != 'HTTP/0.9': if not hasattr(self, '_headers_buffer'): self._headers_buffer = [] self._headers_buffer.append( ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict')) if keyword.lower() == 'connection': if value.lower() == 'close': self.close_connection = True elif value.lower() == 'keep-alive': self.close_connection = False def end_headers(self): """Send the blank line ending the MIME headers.""" if self.request_version != 'HTTP/0.9': self._headers_buffer.append(b"\r\n") self.flush_headers() def flush_headers(self): if hasattr(self, '_headers_buffer'): self.wfile.write(b"".join(self._headers_buffer)) self._headers_buffer = [] def log_request(self, code='-', size='-'): """Log an accepted request. This is called by send_response(). """ if isinstance(code, HTTPStatus): code = code.value self.log_message('"%s" %s %s', self.requestline, str(code), str(size)) def log_error(self, format, *args): """Log an error. This is called when a request cannot be fulfilled. By default it passes the message on to log_message(). Arguments are the same as for log_message(). XXX This should go to the separate error log. """ self.log_message(format, *args) def log_message(self, format, *args): """Log an arbitrary message. This is used by all other logging functions. Override it if you have specific logging wishes. The first argument, FORMAT, is a format string for the message to be logged. If the format string contains any % escapes requiring parameters, they should be specified as subsequent arguments (it's just like printf!). The client ip and current date/time are prefixed to every message. """ sys.stderr.write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args)) def version_string(self): """Return the server software version string.""" return self.server_version + ' ' + self.sys_version def date_time_string(self, timestamp=None): """Return the current date and time formatted for a message header.""" if timestamp is None: timestamp = time.time() return email.utils.formatdate(timestamp, usegmt=True) def log_date_time_string(self): """Return the current time formatted for logging.""" now = time.time() year, month, day, hh, mm, ss, x, y, z = time.localtime(now) s = "%02d/%3s/%04d %02d:%02d:%02d" % ( day, self.monthname[month], year, hh, mm, ss) return s weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] def address_string(self): """Return the client address.""" return self.client_address[0] # Essentially static class variables # The version of the HTTP protocol we support. # Set this to HTTP/1.1 to enable automatic keepalive protocol_version = "HTTP/1.0" # MessageClass used to parse headers MessageClass = http_client.HTTPMessage # hack to maintain backwards compatibility responses = { v: (v.phrase, v.description) for v in HTTPStatus.__members__.values() } class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): """Simple HTTP request handler with GET and HEAD commands. This serves files from the current directory and any of its subdirectories. The MIME type for files is determined by calling the .guess_type() method. The GET and HEAD requests are identical except that the HEAD request omits the actual contents of the file. """ server_version = "SimpleHTTP/" + __version__ def do_GET(self): """Serve a GET request.""" f = self.send_head() if f: try: self.copyfile(f, self.wfile) finally: f.close() def do_HEAD(self): """Serve a HEAD request.""" f = self.send_head() if f: f.close() def send_head(self): """Common code for GET and HEAD commands. This sends the response code and MIME headers. Return value is either a file object (which has to be copied to the outputfile by the caller unless the command was HEAD, and must be closed by the caller under all circumstances), or None, in which case the caller has nothing further to do. """ path = self.translate_path(self.path) f = None if os.path.isdir(path): parts = urllib.parse.urlsplit(self.path) if not parts.path.endswith('/'): # redirect browser - doing basically what apache does self.send_response(HTTPStatus.MOVED_PERMANENTLY) new_parts = (parts[0], parts[1], parts[2] + '/', parts[3], parts[4]) new_url = urllib.parse.urlunsplit(new_parts) self.send_header("Location", new_url) self.end_headers() return None for index in "index.html", "index.htm": index = os.path.join(path, index) if os.path.exists(index): path = index break else: return self.list_directory(path) ctype = self.guess_type(path) try: f = open(path, 'rb') except OSError: self.send_error(HTTPStatus.NOT_FOUND, "File not found") return None try: self.send_response(HTTPStatus.OK) self.send_header("Content-type", ctype) fs = os.fstat(f.fileno()) self.send_header("Content-Length", str(fs[6])) self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) self.end_headers() return f except: f.close() raise def list_directory(self, path): """Helper to produce a directory listing (absent index.html). Return value is either a file object, or None (indicating an error). In either case, the headers are sent, making the interface the same as for send_head(). """ try: list = os.listdir(path) except OSError: self.send_error( HTTPStatus.NOT_FOUND, "No permission to list directory") return None list.sort(key=lambda a: a.lower()) r = [] try: displaypath = urllib.parse.unquote(self.path, errors='surrogatepass') except UnicodeDecodeError: displaypath = urllib.parse.unquote(path) displaypath = html.escape(displaypath, quote=False) enc = sys.getfilesystemencoding() title = 'Directory listing for %s' % displaypath r.append('') r.append('\n') r.append('' % enc) r.append('%s\n' % title) r.append('\n

%s

' % title) r.append('
\n
    ') for name in list: fullname = os.path.join(path, name) displayname = linkname = name # Append / for directories or @ for symbolic links if os.path.isdir(fullname): displayname = name + "/" linkname = name + "/" if os.path.islink(fullname): displayname = name + "@" # Note: a link to a directory displays with @ and links with / r.append('
  • %s
  • ' % (urllib.parse.quote(linkname, errors='surrogatepass'), html.escape(displayname, quote=False))) r.append('
\n
\n\n\n') encoded = '\n'.join(r).encode(enc, 'surrogateescape') f = io.BytesIO() f.write(encoded) f.seek(0) self.send_response(HTTPStatus.OK) self.send_header("Content-type", "text/html; charset=%s" % enc) self.send_header("Content-Length", str(len(encoded))) self.end_headers() return f def translate_path(self, path): """Translate a /-separated PATH to the local filename syntax. Components that mean special things to the local file system (e.g. drive or directory names) are ignored. (XXX They should probably be diagnosed.) """ # abandon query parameters path = path.split('?',1)[0] path = path.split('#',1)[0] # Don't forget explicit trailing slash when normalizing. Issue17324 trailing_slash = path.rstrip().endswith('/') try: path = urllib.parse.unquote(path, errors='surrogatepass') except UnicodeDecodeError: path = urllib.parse.unquote(path) path = posixpath.normpath(path) words = path.split('/') words = filter(None, words) path = os.getcwd() for word in words: if os.path.dirname(word) or word in (os.curdir, os.pardir): # Ignore components that are not a simple file/directory name continue path = os.path.join(path, word) if trailing_slash: path += '/' return path def copyfile(self, source, outputfile): """Copy all data between two file objects. The SOURCE argument is a file object open for reading (or anything with a read() method) and the DESTINATION argument is a file object open for writing (or anything with a write() method). The only reason for overriding this would be to change the block size or perhaps to replace newlines by CRLF -- note however that this the default server uses this to copy binary data as well. """ shutil.copyfileobj(source, outputfile) def guess_type(self, path): """Guess the type of a file. Argument is a PATH (a filename). Return value is a string of the form type/subtype, usable for a MIME Content-type header. The default implementation looks the file's extension up in the table self.extensions_map, using application/octet-stream as a default; however it would be permissible (if slow) to look inside the data to make a better guess. """ base, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() if ext in self.extensions_map: return self.extensions_map[ext] else: return self.extensions_map[''] if not mimetypes.inited: mimetypes.init() # try to read system mime.types extensions_map = mimetypes.types_map.copy() extensions_map.update({ '': 'application/octet-stream', # Default '.py': 'text/plain', '.c': 'text/plain', '.h': 'text/plain', }) # Utilities for CGIHTTPRequestHandler def _url_collapse_path(path): """ Given a URL path, remove extra '/'s and '.' path elements and collapse any '..' references and returns a collapsed path. Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. The utility of this function is limited to is_cgi method and helps preventing some security attacks. Returns: The reconstituted URL, which will always start with a '/'. Raises: IndexError if too many '..' occur within the path. """ # Query component should not be involved. path, _, query = path.partition('?') path = urllib.parse.unquote(path) # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') head_parts = [] for part in path_parts[:-1]: if part == '..': head_parts.pop() # IndexError if more '..' than prior parts elif part and part != '.': head_parts.append( part ) if path_parts: tail_part = path_parts.pop() if tail_part: if tail_part == '..': head_parts.pop() tail_part = '' elif tail_part == '.': tail_part = '' else: tail_part = '' if query: tail_part = '?'.join((tail_part, query)) splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = "/".join(splitpath) return collapsed_path nobody = None def nobody_uid(): """Internal routine to get nobody's uid""" global nobody if nobody: return nobody try: import pwd except ImportError: return -1 try: nobody = pwd.getpwnam('nobody')[2] except KeyError: nobody = 1 + max(x[2] for x in pwd.getpwall()) return nobody def executable(path): """Test for executable file.""" return os.access(path, os.X_OK) class CGIHTTPRequestHandler(SimpleHTTPRequestHandler): """Complete HTTP server with GET, HEAD and POST commands. GET and HEAD also support running CGI scripts. The POST command is *only* implemented for CGI scripts. """ # Determine platform specifics have_fork = hasattr(os, 'fork') # Make rfile unbuffered -- we need to read one line and then pass # the rest to a subprocess, so we can't use buffered input. rbufsize = 0 def do_POST(self): """Serve a POST request. This is only implemented for CGI scripts. """ if self.is_cgi(): self.run_cgi() else: self.send_error( HTTPStatus.NOT_IMPLEMENTED, "Can only POST to CGI scripts") def send_head(self): """Version of send_head that support CGI scripts""" if self.is_cgi(): return self.run_cgi() else: return SimpleHTTPRequestHandler.send_head(self) def is_cgi(self): """Test whether self.path corresponds to a CGI script. Returns True and updates the cgi_info attribute to the tuple (dir, rest) if self.path requires running a CGI script. Returns False otherwise. If any exception is raised, the caller should assume that self.path was rejected as invalid and act accordingly. The default implementation tests whether the normalized url path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ collapsed_path = _url_collapse_path(self.path) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: self.cgi_info = head, tail return True return False cgi_directories = ['/cgi-bin', '/htbin'] def is_executable(self, path): """Test whether argument path is an executable file.""" return executable(path) def is_python(self, path): """Test whether argument path is a Python script.""" head, tail = os.path.splitext(path) return tail.lower() in (".py", ".pyw") def run_cgi(self): """Execute a CGI script.""" dir, rest = self.cgi_info path = dir + '/' + rest i = path.find('/', len(dir)+1) while i >= 0: nextdir = path[:i] nextrest = path[i+1:] scriptdir = self.translate_path(nextdir) if os.path.isdir(scriptdir): dir, rest = nextdir, nextrest i = path.find('/', len(dir)+1) else: break # find an explicit query string, if present. rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. i = rest.find('/') if i >= 0: script, rest = rest[:i], rest[i:] else: script, rest = rest, '' scriptname = dir + '/' + script scriptfile = self.translate_path(scriptname) if not os.path.exists(scriptfile): self.send_error( HTTPStatus.NOT_FOUND, "No such CGI script (%r)" % scriptname) return if not os.path.isfile(scriptfile): self.send_error( HTTPStatus.FORBIDDEN, "CGI script is not a plain file (%r)" % scriptname) return ispy = self.is_python(scriptname) if self.have_fork or not ispy: if not self.is_executable(scriptfile): self.send_error( HTTPStatus.FORBIDDEN, "CGI script is not executable (%r)" % scriptname) return # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html # XXX Much of the following could be prepared ahead of time! env = copy.deepcopy(os.environ) env['SERVER_SOFTWARE'] = self.version_string() env['SERVER_NAME'] = self.server.server_name env['GATEWAY_INTERFACE'] = 'CGI/1.1' env['SERVER_PROTOCOL'] = self.protocol_version env['SERVER_PORT'] = str(self.server.server_port) env['REQUEST_METHOD'] = self.command uqrest = urllib.parse.unquote(rest) env['PATH_INFO'] = uqrest env['PATH_TRANSLATED'] = self.translate_path(uqrest) env['SCRIPT_NAME'] = scriptname if query: env['QUERY_STRING'] = query env['REMOTE_ADDR'] = self.client_address[0] authorization = self.headers.get("authorization") if authorization: authorization = authorization.split() if len(authorization) == 2: import base64, binascii env['AUTH_TYPE'] = authorization[0] if authorization[0].lower() == "basic": try: authorization = authorization[1].encode('ascii') authorization = base64.decodebytes(authorization).\ decode('ascii') except (binascii.Error, UnicodeError): pass else: authorization = authorization.split(':') if len(authorization) == 2: env['REMOTE_USER'] = authorization[0] # XXX REMOTE_IDENT if self.headers.get('content-type') is None: env['CONTENT_TYPE'] = self.headers.get_content_type() else: env['CONTENT_TYPE'] = self.headers['content-type'] length = self.headers.get('content-length') if length: env['CONTENT_LENGTH'] = length referer = self.headers.get('referer') if referer: env['HTTP_REFERER'] = referer accept = [] for line in self.headers.getallmatchingheaders('accept'): if line[:1] in "\t\n\r ": accept.append(line.strip()) else: accept = accept + line[7:].split(',') env['HTTP_ACCEPT'] = ','.join(accept) ua = self.headers.get('user-agent') if ua: env['HTTP_USER_AGENT'] = ua co = filter(None, self.headers.get_all('cookie', [])) cookie_str = ', '.join(co) if cookie_str: env['HTTP_COOKIE'] = cookie_str # XXX Other HTTP_* headers # Since we're setting the env in the parent, provide empty # values to override previously set values for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): env.setdefault(k, "") self.send_response(HTTPStatus.OK, "Script output follows") self.flush_headers() decoded_query = query.replace('+', ' ') if self.have_fork: # Unix -- fork as we should args = [script] if '=' not in decoded_query: args.append(decoded_query) nobody = nobody_uid() self.wfile.flush() # Always flush before forking pid = os.fork() if pid != 0: # Parent pid, sts = os.waitpid(pid, 0) # throw away additional data [see bug #427345] while select.select([self.rfile], [], [], 0)[0]: if not self.rfile.read(1): break if sts: self.log_error("CGI script exit status %#x", sts) return # Child try: try: os.setuid(nobody) except OSError: pass os.dup2(self.rfile.fileno(), 0) os.dup2(self.wfile.fileno(), 1) os.execve(scriptfile, args, env) except: self.server.handle_error(self.request, self.client_address) os._exit(127) else: # Non-Unix -- use subprocess cmdline = [scriptfile] if self.is_python(scriptfile): interp = sys.executable if interp.lower().endswith("w.exe"): # On Windows, use python.exe, not pythonw.exe interp = interp[:-5] + interp[-4:] cmdline = [interp, '-u'] + cmdline if '=' not in query: cmdline.append(query) self.log_message("command: %s", subprocess.list2cmdline(cmdline)) try: nbytes = int(length) except (TypeError, ValueError): nbytes = 0 p = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env = env ) if self.command.lower() == "post" and nbytes > 0: data = self.rfile.read(nbytes) else: data = None # throw away additional data [see bug #427345] while select.select([self.rfile._sock], [], [], 0)[0]: if not self.rfile._sock.recv(1): break stdout, stderr = p.communicate(data) self.wfile.write(stdout) if stderr: self.log_error('%s', stderr) p.stderr.close() p.stdout.close() status = p.returncode if status: self.log_error("CGI script exit status %#x", status) else: self.log_message("CGI script exited OK") def test(HandlerClass=BaseHTTPRequestHandler, ServerClass=HTTPServer, protocol="HTTP/1.0", port=8000, bind=""): """Test the HTTP request handler class. This runs an HTTP server on port 8000 (or the port argument). """ server_address = (bind, port) HandlerClass.protocol_version = protocol with ServerClass(server_address, HandlerClass) as httpd: sa = httpd.socket.getsockname() serve_message = "Serving HTTP on {host} port {port} (http://{host}:{port}/) ..." print(serve_message.format(host=sa[0], port=sa[1])) try: httpd.serve_forever() except KeyboardInterrupt: print("\nKeyboard interrupt received, exiting.") sys.exit(0) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--cgi', action='store_true', help='Run as CGI Server') parser.add_argument('--bind', '-b', default='', metavar='ADDRESS', help='Specify alternate bind address ' '[default: all interfaces]') parser.add_argument('port', action='store', default=8000, type=int, nargs='?', help='Specify alternate port [default: 8000]') args = parser.parse_args() if args.cgi: handler_class = CGIHTTPRequestHandler else: handler_class = SimpleHTTPRequestHandler test(HandlerClass=handler_class, port=args.port, bind=args.bind) eventlet-0.30.2/eventlet/green/httplib.py0000644000076500000240000000075514006212666021034 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import socket import six to_patch = [('socket', socket)] try: from eventlet.green import ssl to_patch.append(('ssl', ssl)) except ImportError: pass if six.PY2: patcher.inject('httplib', globals(), *to_patch) if six.PY3: from eventlet.green.http import client for name in dir(client): if name not in patcher.__exclude: globals()[name] = getattr(client, name) if __name__ == '__main__': test() eventlet-0.30.2/eventlet/green/os.py0000644000076500000240000000650714006212666020010 0ustar temotostaff00000000000000os_orig = __import__("os") import errno socket = __import__("socket") from eventlet import greenio from eventlet.support import get_errno from eventlet import greenthread from eventlet import hubs from eventlet.patcher import slurp_properties __all__ = os_orig.__all__ __patched__ = ['fdopen', 'read', 'write', 'wait', 'waitpid', 'open'] slurp_properties( os_orig, globals(), ignore=__patched__, srckeys=dir(os_orig)) def fdopen(fd, *args, **kw): """fdopen(fd [, mode='r' [, bufsize]]) -> file_object Return an open file object connected to a file descriptor.""" if not isinstance(fd, int): raise TypeError('fd should be int, not %r' % fd) try: return greenio.GreenPipe(fd, *args, **kw) except IOError as e: raise OSError(*e.args) __original_read__ = os_orig.read def read(fd, n): """read(fd, buffersize) -> string Read a file descriptor.""" while True: try: return __original_read__(fd, n) except (OSError, IOError) as e: if get_errno(e) != errno.EAGAIN: raise except socket.error as e: if get_errno(e) == errno.EPIPE: return '' raise try: hubs.trampoline(fd, read=True) except hubs.IOClosed: return '' __original_write__ = os_orig.write def write(fd, st): """write(fd, string) -> byteswritten Write a string to a file descriptor. """ while True: try: return __original_write__(fd, st) except (OSError, IOError) as e: if get_errno(e) != errno.EAGAIN: raise except socket.error as e: if get_errno(e) != errno.EPIPE: raise hubs.trampoline(fd, write=True) def wait(): """wait() -> (pid, status) Wait for completion of a child process.""" return waitpid(0, 0) __original_waitpid__ = os_orig.waitpid def waitpid(pid, options): """waitpid(...) waitpid(pid, options) -> (pid, status) Wait for completion of a given child process.""" if options & os_orig.WNOHANG != 0: return __original_waitpid__(pid, options) else: new_options = options | os_orig.WNOHANG while True: rpid, status = __original_waitpid__(pid, new_options) if rpid and status >= 0: return rpid, status greenthread.sleep(0.01) __original_open__ = os_orig.open def open(file, flags, mode=0o777, dir_fd=None): """ Wrap os.open This behaves identically, but collaborates with the hub's notify_opened protocol. """ # pathlib workaround #534 pathlib._NormalAccessor wraps `open` in # `staticmethod` for py < 3.7 but not 3.7. That means we get here with # `file` being a pathlib._NormalAccessor object, and the other arguments # shifted. Fortunately pathlib doesn't use the `dir_fd` argument, so we # have space in the parameter list. We use some heuristics to detect this # and adjust the parameters (without importing pathlib) if type(file).__name__ == '_NormalAccessor': file, flags, mode, dir_fd = flags, mode, dir_fd, None if dir_fd is not None: fd = __original_open__(file, flags, mode, dir_fd=dir_fd) else: fd = __original_open__(file, flags, mode) hubs.notify_opened(fd) return fd eventlet-0.30.2/eventlet/green/profile.py0000644000076500000240000002252214006212666021022 0ustar temotostaff00000000000000# Copyright (c) 2010, CCP Games # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of CCP Games nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY CCP GAMES ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL CCP GAMES BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """This module is API-equivalent to the standard library :mod:`profile` module lbut it is greenthread-aware as well as thread-aware. Use this module to profile Eventlet-based applications in preference to either :mod:`profile` or :mod:`cProfile`. FIXME: No testcases for this module. """ profile_orig = __import__('profile') __all__ = profile_orig.__all__ from eventlet.patcher import slurp_properties slurp_properties(profile_orig, globals(), srckeys=dir(profile_orig)) import sys import functools from eventlet import greenthread from eventlet import patcher import six thread = patcher.original(six.moves._thread.__name__) # non-monkeypatched module needed # This class provides the start() and stop() functions class Profile(profile_orig.Profile): base = profile_orig.Profile def __init__(self, timer=None, bias=None): self.current_tasklet = greenthread.getcurrent() self.thread_id = thread.get_ident() self.base.__init__(self, timer, bias) self.sleeping = {} def __call__(self, *args): """make callable, allowing an instance to be the profiler""" self.dispatcher(*args) def _setup(self): self._has_setup = True self.cur = None self.timings = {} self.current_tasklet = greenthread.getcurrent() self.thread_id = thread.get_ident() self.simulate_call("profiler") def start(self, name="start"): if getattr(self, "running", False): return self._setup() self.simulate_call("start") self.running = True sys.setprofile(self.dispatcher) def stop(self): sys.setprofile(None) self.running = False self.TallyTimings() # special cases for the original run commands, makin sure to # clear the timer context. def runctx(self, cmd, globals, locals): if not getattr(self, "_has_setup", False): self._setup() try: return profile_orig.Profile.runctx(self, cmd, globals, locals) finally: self.TallyTimings() def runcall(self, func, *args, **kw): if not getattr(self, "_has_setup", False): self._setup() try: return profile_orig.Profile.runcall(self, func, *args, **kw) finally: self.TallyTimings() def trace_dispatch_return_extend_back(self, frame, t): """A hack function to override error checking in parent class. It allows invalid returns (where frames weren't preveiously entered into the profiler) which can happen for all the tasklets that suddenly start to get monitored. This means that the time will eventually be attributed to a call high in the chain, when there is a tasklet switch """ if isinstance(self.cur[-2], Profile.fake_frame): return False self.trace_dispatch_call(frame, 0) return self.trace_dispatch_return(frame, t) def trace_dispatch_c_return_extend_back(self, frame, t): # same for c return if isinstance(self.cur[-2], Profile.fake_frame): return False # ignore bogus returns self.trace_dispatch_c_call(frame, 0) return self.trace_dispatch_return(frame, t) def SwitchTasklet(self, t0, t1, t): # tally the time spent in the old tasklet pt, it, et, fn, frame, rcur = self.cur cur = (pt, it + t, et, fn, frame, rcur) # we are switching to a new tasklet, store the old self.sleeping[t0] = cur, self.timings self.current_tasklet = t1 # find the new one try: self.cur, self.timings = self.sleeping.pop(t1) except KeyError: self.cur, self.timings = None, {} self.simulate_call("profiler") self.simulate_call("new_tasklet") def TallyTimings(self): oldtimings = self.sleeping self.sleeping = {} # first, unwind the main "cur" self.cur = self.Unwind(self.cur, self.timings) # we must keep the timings dicts separate for each tasklet, since it contains # the 'ns' item, recursion count of each function in that tasklet. This is # used in the Unwind dude. for tasklet, (cur, timings) in six.iteritems(oldtimings): self.Unwind(cur, timings) for k, v in six.iteritems(timings): if k not in self.timings: self.timings[k] = v else: # accumulate all to the self.timings cc, ns, tt, ct, callers = self.timings[k] # ns should be 0 after unwinding cc += v[0] tt += v[2] ct += v[3] for k1, v1 in six.iteritems(v[4]): callers[k1] = callers.get(k1, 0) + v1 self.timings[k] = cc, ns, tt, ct, callers def Unwind(self, cur, timings): "A function to unwind a 'cur' frame and tally the results" "see profile.trace_dispatch_return() for details" # also see simulate_cmd_complete() while(cur[-1]): rpt, rit, ret, rfn, frame, rcur = cur frame_total = rit + ret if rfn in timings: cc, ns, tt, ct, callers = timings[rfn] else: cc, ns, tt, ct, callers = 0, 0, 0, 0, {} if not ns: ct = ct + frame_total cc = cc + 1 if rcur: ppt, pit, pet, pfn, pframe, pcur = rcur else: pfn = None if pfn in callers: callers[pfn] = callers[pfn] + 1 # hack: gather more elif pfn: callers[pfn] = 1 timings[rfn] = cc, ns - 1, tt + rit, ct, callers ppt, pit, pet, pfn, pframe, pcur = rcur rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur cur = rcur return cur def ContextWrap(f): @functools.wraps(f) def ContextWrapper(self, arg, t): current = greenthread.getcurrent() if current != self.current_tasklet: self.SwitchTasklet(self.current_tasklet, current, t) t = 0.0 # the time was billed to the previous tasklet return f(self, arg, t) return ContextWrapper # Add "return safety" to the dispatchers Profile.dispatch = dict(profile_orig.Profile.dispatch, **{ 'return': Profile.trace_dispatch_return_extend_back, 'c_return': Profile.trace_dispatch_c_return_extend_back, }) # Add automatic tasklet detection to the callbacks. Profile.dispatch = dict((k, ContextWrap(v)) for k, v in six.viewitems(Profile.dispatch)) # run statements shamelessly stolen from profile.py def run(statement, filename=None, sort=-1): """Run statement under profiler optionally saving results in filename This function takes a single argument that can be passed to the "exec" statement, and an optional file name. In all cases this routine attempts to "exec" its first argument and gather profiling statistics from the execution. If no file name is present, then this function automatically prints a simple profiling report, sorted by the standard name string (file/line/function-name) that is presented in each line. """ prof = Profile() try: prof = prof.run(statement) except SystemExit: pass if filename is not None: prof.dump_stats(filename) else: return prof.print_stats(sort) def runctx(statement, globals, locals, filename=None): """Run statement under profiler, supplying your own globals and locals, optionally saving results in filename. statement and filename have the same semantics as profile.run """ prof = Profile() try: prof = prof.runctx(statement, globals, locals) except SystemExit: pass if filename is not None: prof.dump_stats(filename) else: return prof.print_stats() eventlet-0.30.2/eventlet/green/select.py0000644000076500000240000000532114006212666020637 0ustar temotostaff00000000000000import eventlet from eventlet.hubs import get_hub import six __select = eventlet.patcher.original('select') error = __select.error __patched__ = ['select'] __deleted__ = ['devpoll', 'poll', 'epoll', 'kqueue', 'kevent'] def get_fileno(obj): # The purpose of this function is to exactly replicate # the behavior of the select module when confronted with # abnormal filenos; the details are extensively tested in # the stdlib test/test_select.py. try: f = obj.fileno except AttributeError: if not isinstance(obj, six.integer_types): raise TypeError("Expected int or long, got %s" % type(obj)) return obj else: rv = f() if not isinstance(rv, six.integer_types): raise TypeError("Expected int or long, got %s" % type(rv)) return rv def select(read_list, write_list, error_list, timeout=None): # error checking like this is required by the stdlib unit tests if timeout is not None: try: timeout = float(timeout) except ValueError: raise TypeError("Expected number for timeout") hub = get_hub() timers = [] current = eventlet.getcurrent() assert hub.greenlet is not current, 'do not call blocking functions from the mainloop' ds = {} for r in read_list: ds[get_fileno(r)] = {'read': r} for w in write_list: ds.setdefault(get_fileno(w), {})['write'] = w for e in error_list: ds.setdefault(get_fileno(e), {})['error'] = e listeners = [] def on_read(d): original = ds[get_fileno(d)]['read'] current.switch(([original], [], [])) def on_write(d): original = ds[get_fileno(d)]['write'] current.switch(([], [original], [])) def on_timeout2(): current.switch(([], [], [])) def on_timeout(): # ensure that BaseHub.run() has a chance to call self.wait() # at least once before timed out. otherwise the following code # can time out erroneously. # # s1, s2 = socket.socketpair() # print(select.select([], [s1], [], 0)) timers.append(hub.schedule_call_global(0, on_timeout2)) if timeout is not None: timers.append(hub.schedule_call_global(timeout, on_timeout)) try: for k, v in six.iteritems(ds): if v.get('read'): listeners.append(hub.add(hub.READ, k, on_read, current.throw, lambda: None)) if v.get('write'): listeners.append(hub.add(hub.WRITE, k, on_write, current.throw, lambda: None)) try: return hub.switch() finally: for l in listeners: hub.remove(l) finally: for t in timers: t.cancel() eventlet-0.30.2/eventlet/green/selectors.py0000644000076500000240000000166414006212666021371 0ustar temotostaff00000000000000import sys from eventlet import patcher from eventlet.green import select __patched__ = [ 'DefaultSelector', 'SelectSelector', ] # We only have green select so the options are: # * leave it be and have selectors that block # * try to pretend the "bad" selectors don't exist # * replace all with SelectSelector for the price of possibly different # performance characteristic and missing fileno() method (if someone # uses it it'll result in a crash, we may want to implement it in the future) # # This module used to follow the third approach but just removing the offending # selectors is less error prone and less confusing approach. __deleted__ = [ 'PollSelector', 'EpollSelector', 'DevpollSelector', 'KqueueSelector', ] patcher.inject('selectors', globals(), ('select', select)) del patcher if sys.platform != 'win32': SelectSelector._select = staticmethod(select.select) DefaultSelector = SelectSelector eventlet-0.30.2/eventlet/green/socket.py0000644000076500000240000000357614006212666020662 0ustar temotostaff00000000000000import os import sys __import__('eventlet.green._socket_nodns') __socket = sys.modules['eventlet.green._socket_nodns'] __all__ = __socket.__all__ __patched__ = __socket.__patched__ + [ 'create_connection', 'getaddrinfo', 'gethostbyname', 'gethostbyname_ex', 'getnameinfo', ] from eventlet.patcher import slurp_properties slurp_properties(__socket, globals(), srckeys=dir(__socket)) if os.environ.get("EVENTLET_NO_GREENDNS", '').lower() != 'yes': from eventlet.support import greendns gethostbyname = greendns.gethostbyname getaddrinfo = greendns.getaddrinfo gethostbyname_ex = greendns.gethostbyname_ex getnameinfo = greendns.getnameinfo del greendns def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. """ err = "getaddrinfo returns an empty list" host, port = address for res in getaddrinfo(host, port, 0, SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket(af, socktype, proto) if timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) return sock except error as e: err = e if sock is not None: sock.close() if not isinstance(err, error): err = error(err) raise err eventlet-0.30.2/eventlet/green/ssl.py0000644000076500000240000004625214006212666020171 0ustar temotostaff00000000000000__ssl = __import__('ssl') from eventlet.patcher import slurp_properties slurp_properties(__ssl, globals(), srckeys=dir(__ssl)) import sys from eventlet import greenio, hubs from eventlet.greenio import ( set_nonblocking, GreenSocket, CONNECT_ERR, CONNECT_SUCCESS, ) from eventlet.hubs import trampoline, IOClosed from eventlet.support import get_errno, PY33 import six from contextlib import contextmanager orig_socket = __import__('socket') socket = orig_socket.socket timeout_exc = SSLError __patched__ = [ 'SSLSocket', 'SSLContext', 'wrap_socket', 'sslwrap_simple', 'create_default_context', '_create_default_https_context'] _original_sslsocket = __ssl.SSLSocket _original_wrap_socket = __ssl.wrap_socket _original_sslcontext = getattr(__ssl, 'SSLContext', None) _is_under_py_3_7 = sys.version_info < (3, 7) @contextmanager def _original_ssl_context(*args, **kwargs): tmp_sslcontext = _original_wrap_socket.__globals__.get('SSLContext', None) tmp_sslsocket = _original_sslsocket._create.__globals__.get('SSLSocket', None) _original_sslsocket._create.__globals__['SSLSocket'] = _original_sslsocket _original_wrap_socket.__globals__['SSLContext'] = _original_sslcontext try: yield finally: _original_wrap_socket.__globals__['SSLContext'] = tmp_sslcontext _original_sslsocket._create.__globals__['SSLSocket'] = tmp_sslsocket class GreenSSLSocket(_original_sslsocket): """ This is a green version of the SSLSocket class from the ssl module added in 2.6. For documentation on it, please see the Python standard documentation. Python nonblocking ssl objects don't give errors when the other end of the socket is closed (they do notice when the other end is shutdown, though). Any write/read operations will simply hang if the socket is closed from the other end. There is no obvious fix for this problem; it appears to be a limitation of Python's ssl object implementation. A workaround is to set a reasonable timeout on the socket using settimeout(), and to close/reopen the connection when a timeout occurs at an unexpected juncture in the code. """ def __new__(cls, sock=None, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, ssl_version=PROTOCOL_SSLv23, ca_certs=None, do_handshake_on_connect=True, *args, **kw): if _is_under_py_3_7: return super(GreenSSLSocket, cls).__new__(cls) else: if not isinstance(sock, GreenSocket): sock = GreenSocket(sock) with _original_ssl_context(): context = kw.get('_context') if context: ret = _original_sslsocket._create( sock=sock.fd, server_side=server_side, do_handshake_on_connect=False, suppress_ragged_eofs=kw.get('suppress_ragged_eofs'), server_hostname=kw.get('server_hostname'), context=context, session=kw.get('session'), ) else: ret = _original_wrap_socket( sock=sock.fd, keyfile=keyfile, certfile=certfile, server_side=server_side, cert_reqs=cert_reqs, ssl_version=ssl_version, ca_certs=ca_certs, do_handshake_on_connect=False, ) ret.keyfile = keyfile ret.certfile = certfile ret.cert_reqs = cert_reqs ret.ssl_version = ssl_version ret.ca_certs = ca_certs ret.__class__ = GreenSSLSocket return ret # we are inheriting from SSLSocket because its constructor calls # do_handshake whose behavior we wish to override def __init__(self, sock, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, ssl_version=PROTOCOL_SSLv23, ca_certs=None, do_handshake_on_connect=True, *args, **kw): if not isinstance(sock, GreenSocket): sock = GreenSocket(sock) self.act_non_blocking = sock.act_non_blocking if six.PY2: # On Python 2 SSLSocket constructor queries the timeout, it'd break without # this assignment self._timeout = sock.gettimeout() if _is_under_py_3_7: # nonblocking socket handshaking on connect got disabled so let's pretend it's disabled # even when it's on super(GreenSSLSocket, self).__init__( sock.fd, keyfile, certfile, server_side, cert_reqs, ssl_version, ca_certs, do_handshake_on_connect and six.PY2, *args, **kw) # the superclass initializer trashes the methods so we remove # the local-object versions of them and let the actual class # methods shine through # Note: This for Python 2 try: for fn in orig_socket._delegate_methods: delattr(self, fn) except AttributeError: pass if six.PY3: # Python 3 SSLSocket construction process overwrites the timeout so restore it self._timeout = sock.gettimeout() # it also sets timeout to None internally apparently (tested with 3.4.2) _original_sslsocket.settimeout(self, 0.0) assert _original_sslsocket.gettimeout(self) == 0.0 # see note above about handshaking self.do_handshake_on_connect = do_handshake_on_connect if do_handshake_on_connect and self._connected: self.do_handshake() def settimeout(self, timeout): self._timeout = timeout def gettimeout(self): return self._timeout def setblocking(self, flag): if flag: self.act_non_blocking = False self._timeout = None else: self.act_non_blocking = True self._timeout = 0.0 def _call_trampolining(self, func, *a, **kw): if self.act_non_blocking: return func(*a, **kw) else: while True: try: return func(*a, **kw) except SSLError as exc: if get_errno(exc) == SSL_ERROR_WANT_READ: trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out')) elif get_errno(exc) == SSL_ERROR_WANT_WRITE: trampoline(self, write=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out')) else: raise def write(self, data): """Write DATA to the underlying SSL channel. Returns number of bytes of DATA actually transmitted.""" return self._call_trampolining( super(GreenSSLSocket, self).write, data) def read(self, *args, **kwargs): """Read up to LEN bytes and return them. Return zero-length string on EOF.""" try: return self._call_trampolining( super(GreenSSLSocket, self).read, *args, **kwargs) except IOClosed: return b'' def send(self, data, flags=0): if self._sslobj: return self._call_trampolining( super(GreenSSLSocket, self).send, data, flags) else: trampoline(self, write=True, timeout_exc=timeout_exc('timed out')) return socket.send(self, data, flags) def sendto(self, data, addr, flags=0): # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is if self._sslobj: raise ValueError("sendto not allowed on instances of %s" % self.__class__) else: trampoline(self, write=True, timeout_exc=timeout_exc('timed out')) return socket.sendto(self, data, addr, flags) def sendall(self, data, flags=0): # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is if self._sslobj: if flags != 0: raise ValueError( "non-zero flags not allowed in calls to sendall() on %s" % self.__class__) amount = len(data) count = 0 data_to_send = data while (count < amount): v = self.send(data_to_send) count += v if v == 0: trampoline(self, write=True, timeout_exc=timeout_exc('timed out')) else: data_to_send = data[count:] return amount else: while True: try: return socket.sendall(self, data, flags) except orig_socket.error as e: if self.act_non_blocking: raise erno = get_errno(e) if erno in greenio.SOCKET_BLOCKING: trampoline(self, write=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out')) elif erno in greenio.SOCKET_CLOSED: return '' raise def recv(self, buflen=1024, flags=0): return self._base_recv(buflen, flags, into=False) def recv_into(self, buffer, nbytes=None, flags=0): # Copied verbatim from CPython if buffer and nbytes is None: nbytes = len(buffer) elif nbytes is None: nbytes = 1024 # end of CPython code return self._base_recv(nbytes, flags, into=True, buffer_=buffer) def _base_recv(self, nbytes, flags, into, buffer_=None): if into: plain_socket_function = socket.recv_into else: plain_socket_function = socket.recv # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is if self._sslobj: if flags != 0: raise ValueError( "non-zero flags not allowed in calls to %s() on %s" % plain_socket_function.__name__, self.__class__) if into: read = self.read(nbytes, buffer_) else: read = self.read(nbytes) return read else: while True: try: args = [self, nbytes, flags] if into: args.insert(1, buffer_) return plain_socket_function(*args) except orig_socket.error as e: if self.act_non_blocking: raise erno = get_errno(e) if erno in greenio.SOCKET_BLOCKING: try: trampoline( self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out')) except IOClosed: return b'' elif erno in greenio.SOCKET_CLOSED: return b'' raise def recvfrom(self, addr, buflen=1024, flags=0): if not self.act_non_blocking: trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out')) return super(GreenSSLSocket, self).recvfrom(addr, buflen, flags) def recvfrom_into(self, buffer, nbytes=None, flags=0): if not self.act_non_blocking: trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out')) return super(GreenSSLSocket, self).recvfrom_into(buffer, nbytes, flags) def unwrap(self): return GreenSocket(self._call_trampolining( super(GreenSSLSocket, self).unwrap)) def do_handshake(self): """Perform a TLS/SSL handshake.""" return self._call_trampolining( super(GreenSSLSocket, self).do_handshake) def _socket_connect(self, addr): real_connect = socket.connect if self.act_non_blocking: return real_connect(self, addr) else: clock = hubs.get_hub().clock # *NOTE: gross, copied code from greenio because it's not factored # well enough to reuse if self.gettimeout() is None: while True: try: return real_connect(self, addr) except orig_socket.error as exc: if get_errno(exc) in CONNECT_ERR: trampoline(self, write=True) elif get_errno(exc) in CONNECT_SUCCESS: return else: raise else: end = clock() + self.gettimeout() while True: try: real_connect(self, addr) except orig_socket.error as exc: if get_errno(exc) in CONNECT_ERR: trampoline( self, write=True, timeout=end - clock(), timeout_exc=timeout_exc('timed out')) elif get_errno(exc) in CONNECT_SUCCESS: return else: raise if clock() >= end: raise timeout_exc('timed out') def connect(self, addr): """Connects to remote ADDR, and then wraps the connection in an SSL channel.""" # *NOTE: grrrrr copied this code from ssl.py because of the reference # to socket.connect which we don't want to call directly if self._sslobj: raise ValueError("attempt to connect already-connected SSLSocket!") self._socket_connect(addr) server_side = False try: sslwrap = _ssl.sslwrap except AttributeError: # sslwrap was removed in 3.x and later in 2.7.9 if six.PY2: sslobj = self._context._wrap_socket(self._sock, server_side, ssl_sock=self) else: context = self.context if PY33 else self._context sslobj = context._wrap_socket(self, server_side) else: sslobj = sslwrap(self._sock, server_side, self.keyfile, self.certfile, self.cert_reqs, self.ssl_version, self.ca_certs, *self.ciphers) try: # This is added in Python 3.5, http://bugs.python.org/issue21965 SSLObject except NameError: self._sslobj = sslobj else: if _is_under_py_3_7: self._sslobj = SSLObject(sslobj, owner=self) else: self._sslobj = sslobj if self.do_handshake_on_connect: self.do_handshake() def accept(self): """Accepts a new connection from a remote client, and returns a tuple containing that new connection wrapped with a server-side SSL channel, and the address of the remote client.""" # RDW grr duplication of code from greenio if self.act_non_blocking: newsock, addr = socket.accept(self) else: while True: try: newsock, addr = socket.accept(self) break except orig_socket.error as e: if get_errno(e) not in greenio.SOCKET_BLOCKING: raise trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out')) new_ssl = type(self)( newsock, server_side=True, do_handshake_on_connect=False, suppress_ragged_eofs=self.suppress_ragged_eofs, _context=self._context, ) return (new_ssl, addr) def dup(self): raise NotImplementedError("Can't dup an ssl object") SSLSocket = GreenSSLSocket def wrap_socket(sock, *a, **kw): return GreenSSLSocket(sock, *a, **kw) if hasattr(__ssl, 'sslwrap_simple'): def sslwrap_simple(sock, keyfile=None, certfile=None): """A replacement for the old socket.ssl function. Designed for compatibility with Python 2.5 and earlier. Will disappear in Python 3.0.""" ssl_sock = GreenSSLSocket(sock, keyfile=keyfile, certfile=certfile, server_side=False, cert_reqs=CERT_NONE, ssl_version=PROTOCOL_SSLv23, ca_certs=None) return ssl_sock if hasattr(__ssl, 'SSLContext'): _original_sslcontext = __ssl.SSLContext class GreenSSLContext(_original_sslcontext): __slots__ = () def wrap_socket(self, sock, *a, **kw): return GreenSSLSocket(sock, *a, _context=self, **kw) # https://github.com/eventlet/eventlet/issues/371 # Thanks to Gevent developers for sharing patch to this problem. if hasattr(_original_sslcontext.options, 'setter'): # In 3.6, these became properties. They want to access the # property __set__ method in the superclass, and they do so by using # super(SSLContext, SSLContext). But we rebind SSLContext when we monkey # patch, which causes infinite recursion. # https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296 @_original_sslcontext.options.setter def options(self, value): super(_original_sslcontext, _original_sslcontext).options.__set__(self, value) @_original_sslcontext.verify_flags.setter def verify_flags(self, value): super(_original_sslcontext, _original_sslcontext).verify_flags.__set__(self, value) @_original_sslcontext.verify_mode.setter def verify_mode(self, value): super(_original_sslcontext, _original_sslcontext).verify_mode.__set__(self, value) SSLContext = GreenSSLContext if hasattr(__ssl, 'create_default_context'): _original_create_default_context = __ssl.create_default_context def green_create_default_context(*a, **kw): # We can't just monkey-patch on the green version of `wrap_socket` # on to SSLContext instances, but SSLContext.create_default_context # does a bunch of work. Rather than re-implementing it all, just # switch out the __class__ to get our `wrap_socket` implementation context = _original_create_default_context(*a, **kw) context.__class__ = GreenSSLContext return context create_default_context = green_create_default_context _create_default_https_context = green_create_default_context eventlet-0.30.2/eventlet/green/subprocess.py0000644000076500000240000001331514006212666021552 0ustar temotostaff00000000000000import errno import sys from types import FunctionType import eventlet from eventlet import greenio from eventlet import patcher from eventlet.green import select, threading, time import six __patched__ = ['call', 'check_call', 'Popen'] to_patch = [('select', select), ('threading', threading), ('time', time)] if sys.version_info > (3, 4): from eventlet.green import selectors to_patch.append(('selectors', selectors)) patcher.inject('subprocess', globals(), *to_patch) subprocess_orig = patcher.original("subprocess") subprocess_imported = sys.modules.get('subprocess', subprocess_orig) mswindows = sys.platform == "win32" if getattr(subprocess_orig, 'TimeoutExpired', None) is None: # Backported from Python 3.3. # https://bitbucket.org/eventlet/eventlet/issue/89 class TimeoutExpired(Exception): """This exception is raised when the timeout expires while waiting for a child process. """ def __init__(self, cmd, timeout, output=None): self.cmd = cmd self.timeout = timeout self.output = output def __str__(self): return ("Command '%s' timed out after %s seconds" % (self.cmd, self.timeout)) else: TimeoutExpired = subprocess_imported.TimeoutExpired # This is the meat of this module, the green version of Popen. class Popen(subprocess_orig.Popen): """eventlet-friendly version of subprocess.Popen""" # We do not believe that Windows pipes support non-blocking I/O. At least, # the Python file objects stored on our base-class object have no # setblocking() method, and the Python fcntl module doesn't exist on # Windows. (see eventlet.greenio.set_nonblocking()) As the sole purpose of # this __init__() override is to wrap the pipes for eventlet-friendly # non-blocking I/O, don't even bother overriding it on Windows. if not mswindows: def __init__(self, args, bufsize=0, *argss, **kwds): self.args = args # Forward the call to base-class constructor subprocess_orig.Popen.__init__(self, args, 0, *argss, **kwds) # Now wrap the pipes, if any. This logic is loosely borrowed from # eventlet.processes.Process.run() method. for attr in "stdin", "stdout", "stderr": pipe = getattr(self, attr) if pipe is not None and type(pipe) != greenio.GreenPipe: # https://github.com/eventlet/eventlet/issues/243 # AttributeError: '_io.TextIOWrapper' object has no attribute 'mode' mode = getattr(pipe, 'mode', '') if not mode: if pipe.readable(): mode += 'r' if pipe.writable(): mode += 'w' # ValueError: can't have unbuffered text I/O if bufsize == 0: bufsize = -1 wrapped_pipe = greenio.GreenPipe(pipe, mode, bufsize) setattr(self, attr, wrapped_pipe) __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__ def wait(self, timeout=None, check_interval=0.01): # Instead of a blocking OS call, this version of wait() uses logic # borrowed from the eventlet 0.2 processes.Process.wait() method. if timeout is not None: endtime = time.time() + timeout try: while True: status = self.poll() if status is not None: return status if timeout is not None and time.time() > endtime: raise TimeoutExpired(self.args, timeout) eventlet.sleep(check_interval) except OSError as e: if e.errno == errno.ECHILD: # no child process, this happens if the child process # already died and has been cleaned up return -1 else: raise wait.__doc__ = subprocess_orig.Popen.wait.__doc__ if not mswindows: # don't want to rewrite the original _communicate() method, we # just want a version that uses eventlet.green.select.select() # instead of select.select(). _communicate = FunctionType( six.get_function_code(six.get_unbound_function( subprocess_orig.Popen._communicate)), globals()) try: _communicate_with_select = FunctionType( six.get_function_code(six.get_unbound_function( subprocess_orig.Popen._communicate_with_select)), globals()) _communicate_with_poll = FunctionType( six.get_function_code(six.get_unbound_function( subprocess_orig.Popen._communicate_with_poll)), globals()) except AttributeError: pass # Borrow subprocess.call() and check_call(), but patch them so they reference # OUR Popen class rather than subprocess.Popen. def patched_function(function): new_function = FunctionType(six.get_function_code(function), globals()) if six.PY3: new_function.__kwdefaults__ = function.__kwdefaults__ new_function.__defaults__ = function.__defaults__ return new_function call = patched_function(subprocess_orig.call) check_call = patched_function(subprocess_orig.check_call) # check_output is Python 2.7+ if hasattr(subprocess_orig, 'check_output'): __patched__.append('check_output') check_output = patched_function(subprocess_orig.check_output) del patched_function # Keep exceptions identity. # https://github.com/eventlet/eventlet/issues/413 CalledProcessError = subprocess_imported.CalledProcessError del subprocess_imported eventlet-0.30.2/eventlet/green/thread.py0000644000076500000240000000604714006212666020635 0ustar temotostaff00000000000000"""Implements the standard thread module, using greenthreads.""" from six.moves import _thread as __thread import six from eventlet.support import greenlets as greenlet from eventlet import greenthread from eventlet.semaphore import Semaphore as LockType import sys __patched__ = ['get_ident', 'start_new_thread', 'start_new', 'allocate_lock', 'allocate', 'exit', 'interrupt_main', 'stack_size', '_local', 'LockType', '_count'] error = __thread.error __threadcount = 0 if six.PY3: def _set_sentinel(): # TODO this is a dummy code, reimplementing this may be needed: # https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203 return allocate_lock() TIMEOUT_MAX = __thread.TIMEOUT_MAX def _count(): return __threadcount def get_ident(gr=None): if gr is None: return id(greenlet.getcurrent()) else: return id(gr) def __thread_body(func, args, kwargs): global __threadcount __threadcount += 1 try: func(*args, **kwargs) finally: __threadcount -= 1 def start_new_thread(function, args=(), kwargs=None): if (sys.version_info >= (3, 4) and getattr(function, '__module__', '') == 'threading' and hasattr(function, '__self__')): # Since Python 3.4, threading.Thread uses an internal lock # automatically released when the python thread state is deleted. # With monkey patching, eventlet uses green threads without python # thread state, so the lock is not automatically released. # # Wrap _bootstrap_inner() to release explicitly the thread state lock # when the thread completes. thread = function.__self__ bootstrap_inner = thread._bootstrap_inner def wrap_bootstrap_inner(): try: bootstrap_inner() finally: # The lock can be cleared (ex: by a fork()) if thread._tstate_lock is not None: thread._tstate_lock.release() thread._bootstrap_inner = wrap_bootstrap_inner kwargs = kwargs or {} g = greenthread.spawn_n(__thread_body, function, args, kwargs) return get_ident(g) start_new = start_new_thread def allocate_lock(*a): return LockType(1) allocate = allocate_lock def exit(): raise greenlet.GreenletExit exit_thread = __thread.exit_thread def interrupt_main(): curr = greenlet.getcurrent() if curr.parent and not curr.parent.dead: curr.parent.throw(KeyboardInterrupt()) else: raise KeyboardInterrupt() if hasattr(__thread, 'stack_size'): __original_stack_size__ = __thread.stack_size def stack_size(size=None): if size is None: return __original_stack_size__() if size > __original_stack_size__(): return __original_stack_size__(size) else: pass # not going to decrease stack_size, because otherwise other greenlets in # this thread will suffer from eventlet.corolocal import local as _local eventlet-0.30.2/eventlet/green/threading.py0000644000076500000240000000744214006212666021333 0ustar temotostaff00000000000000"""Implements the standard threading module, using greenthreads.""" import eventlet from eventlet.green import thread from eventlet.green import time from eventlet.support import greenlets as greenlet import six __patched__ = ['_start_new_thread', '_allocate_lock', '_sleep', 'local', 'stack_size', 'Lock', 'currentThread', 'current_thread', '_after_fork', '_shutdown'] if six.PY2: __patched__ += ['_get_ident'] else: __patched__ += ['get_ident', '_set_sentinel'] __orig_threading = eventlet.patcher.original('threading') __threadlocal = __orig_threading.local() __patched_enumerate = None eventlet.patcher.inject( 'threading', globals(), ('thread' if six.PY2 else '_thread', thread), ('time', time)) _count = 1 class _GreenThread(object): """Wrapper for GreenThread objects to provide Thread-like attributes and methods""" def __init__(self, g): global _count self._g = g self._name = 'GreenThread-%d' % _count _count += 1 def __repr__(self): return '<_GreenThread(%s, %r)>' % (self._name, self._g) def join(self, timeout=None): return self._g.wait() def getName(self): return self._name get_name = getName def setName(self, name): self._name = str(name) set_name = setName name = property(getName, setName) ident = property(lambda self: id(self._g)) def isAlive(self): return True is_alive = isAlive daemon = property(lambda self: True) def isDaemon(self): return self.daemon is_daemon = isDaemon __threading = None def _fixup_thread(t): # Some third-party packages (lockfile) will try to patch the # threading.Thread class with a get_name attribute if it doesn't # exist. Since we might return Thread objects from the original # threading package that won't get patched, let's make sure each # individual object gets patched too our patched threading.Thread # class has been patched. This is why monkey patching can be bad... global __threading if not __threading: __threading = __import__('threading') if (hasattr(__threading.Thread, 'get_name') and not hasattr(t, 'get_name')): t.get_name = t.getName return t def current_thread(): global __patched_enumerate g = greenlet.getcurrent() if not g: # Not currently in a greenthread, fall back to standard function return _fixup_thread(__orig_threading.current_thread()) try: active = __threadlocal.active except AttributeError: active = __threadlocal.active = {} g_id = id(g) t = active.get(g_id) if t is not None: return t # FIXME: move import from function body to top # (jaketesler@github) Furthermore, I was unable to have the current_thread() return correct results from # threading.enumerate() unless the enumerate() function was a) imported at runtime using the gross __import__() call # and b) was hot-patched using patch_function(). # https://github.com/eventlet/eventlet/issues/172#issuecomment-379421165 if __patched_enumerate is None: __patched_enumerate = eventlet.patcher.patch_function(__import__('threading').enumerate) found = [th for th in __patched_enumerate() if th.ident == g_id] if found: return found[0] # Add green thread to active if we can clean it up on exit def cleanup(g): del active[g_id] try: g.link(cleanup) except AttributeError: # Not a GreenThread type, so there's no way to hook into # the green thread exiting. Fall back to the standard # function then. t = _fixup_thread(__orig_threading.current_thread()) else: t = active[g_id] = _GreenThread(g) return t currentThread = current_thread eventlet-0.30.2/eventlet/green/time.py0000644000076500000240000000036014006212666020314 0ustar temotostaff00000000000000__time = __import__('time') from eventlet.patcher import slurp_properties __patched__ = ['sleep'] slurp_properties(__time, globals(), ignore=__patched__, srckeys=dir(__time)) from eventlet.greenthread import sleep sleep # silence pyflakes eventlet-0.30.2/eventlet/green/urllib/0000755000076500000240000000000014017673044020301 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/green/urllib/__init__.py0000644000076500000240000000257114006212666022414 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import socket from eventlet.green import time from eventlet.green import httplib from eventlet.green import ftplib import six if six.PY2: to_patch = [('socket', socket), ('httplib', httplib), ('time', time), ('ftplib', ftplib)] try: from eventlet.green import ssl to_patch.append(('ssl', ssl)) except ImportError: pass patcher.inject('urllib', globals(), *to_patch) try: URLopener except NameError: patcher.inject('urllib.request', globals(), *to_patch) # patch a bunch of things that have imports inside the # function body; this is lame and hacky but I don't feel # too bad because urllib is a hacky pile of junk that no # one should be using anyhow URLopener.open_http = patcher.patch_function(URLopener.open_http, ('httplib', httplib)) if hasattr(URLopener, 'open_https'): URLopener.open_https = patcher.patch_function(URLopener.open_https, ('httplib', httplib)) URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, ('ftplib', ftplib)) ftpwrapper.init = patcher.patch_function(ftpwrapper.init, ('ftplib', ftplib)) ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, ('ftplib', ftplib)) del patcher # Run test program when run as a script if __name__ == '__main__': main() eventlet-0.30.2/eventlet/green/urllib/error.py0000644000076500000240000000023514006212666022001 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green.urllib import response patcher.inject('urllib.error', globals(), ('urllib.response', response)) del patcher eventlet-0.30.2/eventlet/green/urllib/parse.py0000644000076500000240000000012314006212666021756 0ustar temotostaff00000000000000from eventlet import patcher patcher.inject('urllib.parse', globals()) del patcher eventlet-0.30.2/eventlet/green/urllib/request.py0000644000076500000240000000274014006212666022343 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import ftplib, http, os, socket, time from eventlet.green.http import client as http_client from eventlet.green.urllib import error, parse, response # TODO should we also have green email version? # import email to_patch = [ # This (http module) is needed here, otherwise test__greenness hangs # forever on Python 3 because parts of non-green http (including # http.client) leak into our patched urllib.request. There may be a nicer # way to handle this (I didn't dig too deep) but this does the job. Jakub ('http', http), ('http.client', http_client), ('os', os), ('socket', socket), ('time', time), ('urllib.error', error), ('urllib.parse', parse), ('urllib.response', response), ] try: from eventlet.green import ssl except ImportError: pass else: to_patch.append(('ssl', ssl)) patcher.inject('urllib.request', globals(), *to_patch) del to_patch to_patch_in_functions = [('ftplib', ftplib)] del ftplib FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, *to_patch_in_functions) URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, *to_patch_in_functions) ftperrors = patcher.patch_function(ftperrors, *to_patch_in_functions) ftpwrapper.init = patcher.patch_function(ftpwrapper.init, *to_patch_in_functions) ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, *to_patch_in_functions) del error del parse del response del to_patch_in_functions eventlet-0.30.2/eventlet/green/urllib/response.py0000644000076500000240000000012614006212666022505 0ustar temotostaff00000000000000from eventlet import patcher patcher.inject('urllib.response', globals()) del patcher eventlet-0.30.2/eventlet/green/urllib2.py0000644000076500000240000000075014006212666020734 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import ftplib from eventlet.green import httplib from eventlet.green import socket from eventlet.green import ssl from eventlet.green import time from eventlet.green import urllib patcher.inject( 'urllib2', globals(), ('httplib', httplib), ('socket', socket), ('ssl', ssl), ('time', time), ('urllib', urllib)) FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, ('ftplib', ftplib)) del patcher eventlet-0.30.2/eventlet/green/zmq.py0000644000076500000240000004322214006212666020171 0ustar temotostaff00000000000000# coding: utf-8 """The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq ` to be non blocking. """ __zmq__ = __import__('zmq') import eventlet.hubs from eventlet.patcher import slurp_properties from eventlet.support import greenlets as greenlet __patched__ = ['Context', 'Socket'] slurp_properties(__zmq__, globals(), ignore=__patched__) from collections import deque try: # alias XREQ/XREP to DEALER/ROUTER if available if not hasattr(__zmq__, 'XREQ'): XREQ = DEALER if not hasattr(__zmq__, 'XREP'): XREP = ROUTER except NameError: pass class LockReleaseError(Exception): pass class _QueueLock(object): """A Lock that can be acquired by at most one thread. Any other thread calling acquire will be blocked in a queue. When release is called, the threads are awoken in the order they blocked, one at a time. This lock can be required recursively by the same thread.""" def __init__(self): self._waiters = deque() self._count = 0 self._holder = None self._hub = eventlet.hubs.get_hub() def __nonzero__(self): return bool(self._count) __bool__ = __nonzero__ def __enter__(self): self.acquire() def __exit__(self, type, value, traceback): self.release() def acquire(self): current = greenlet.getcurrent() if (self._waiters or self._count > 0) and self._holder is not current: # block until lock is free self._waiters.append(current) self._hub.switch() w = self._waiters.popleft() assert w is current, 'Waiting threads woken out of order' assert self._count == 0, 'After waking a thread, the lock must be unacquired' self._holder = current self._count += 1 def release(self): if self._count <= 0: raise LockReleaseError("Cannot release unacquired lock") self._count -= 1 if self._count == 0: self._holder = None if self._waiters: # wake next self._hub.schedule_call_global(0, self._waiters[0].switch) class _BlockedThread(object): """Is either empty, or represents a single blocked thread that blocked itself by calling the block() method. The thread can be awoken by calling wake(). Wake() can be called multiple times and all but the first call will have no effect.""" def __init__(self): self._blocked_thread = None self._wakeupper = None self._hub = eventlet.hubs.get_hub() def __nonzero__(self): return self._blocked_thread is not None __bool__ = __nonzero__ def block(self, deadline=None): if self._blocked_thread is not None: raise Exception("Cannot block more than one thread on one BlockedThread") self._blocked_thread = greenlet.getcurrent() if deadline is not None: self._hub.schedule_call_local(deadline - self._hub.clock(), self.wake) try: self._hub.switch() finally: self._blocked_thread = None # cleanup the wakeup task if self._wakeupper is not None: # Important to cancel the wakeup task so it doesn't # spuriously wake this greenthread later on. self._wakeupper.cancel() self._wakeupper = None def wake(self): """Schedules the blocked thread to be awoken and return True. If wake has already been called or if there is no blocked thread, then this call has no effect and returns False.""" if self._blocked_thread is not None and self._wakeupper is None: self._wakeupper = self._hub.schedule_call_global(0, self._blocked_thread.switch) return True return False class Context(__zmq__.Context): """Subclass of :class:`zmq.Context` """ def socket(self, socket_type): """Overridden method to ensure that the green version of socket is used Behaves the same as :meth:`zmq.Context.socket`, but ensures that a :class:`Socket` with all of its send and recv methods set to be non-blocking is returned """ if self.closed: raise ZMQError(ENOTSUP) return Socket(self, socket_type) def _wraps(source_fn): """A decorator that copies the __name__ and __doc__ from the given function """ def wrapper(dest_fn): dest_fn.__name__ = source_fn.__name__ dest_fn.__doc__ = source_fn.__doc__ return dest_fn return wrapper # Implementation notes: Each socket in 0mq contains a pipe that the # background IO threads use to communicate with the socket. These # events are important because they tell the socket when it is able to # send and when it has messages waiting to be received. The read end # of the events pipe is the same FD that getsockopt(zmq.FD) returns. # # Events are read from the socket's event pipe only on the thread that # the 0mq context is associated with, which is the native thread the # greenthreads are running on, and the only operations that cause the # events to be read and processed are send(), recv() and # getsockopt(zmq.EVENTS). This means that after doing any of these # three operations, the ability of the socket to send or receive a # message without blocking may have changed, but after the events are # read the FD is no longer readable so the hub may not signal our # listener. # # If we understand that after calling send() a message might be ready # to be received and that after calling recv() a message might be able # to be sent, what should we do next? There are two approaches: # # 1. Always wake the other thread if there is one waiting. This # wakeup may be spurious because the socket might not actually be # ready for a send() or recv(). However, if a thread is in a # tight-loop successfully calling send() or recv() then the wakeups # are naturally batched and there's very little cost added to each # send/recv call. # # or # # 2. Call getsockopt(zmq.EVENTS) and explicitly check if the other # thread should be woken up. This avoids spurious wake-ups but may # add overhead because getsockopt will cause all events to be # processed, whereas send and recv throttle processing # events. Admittedly, all of the events will need to be processed # eventually, but it is likely faster to batch the processing. # # Which approach is better? I have no idea. # # TODO: # - Support MessageTrackers and make MessageTracker.wait green _Socket = __zmq__.Socket _Socket_recv = _Socket.recv _Socket_send = _Socket.send _Socket_send_multipart = _Socket.send_multipart _Socket_recv_multipart = _Socket.recv_multipart _Socket_send_string = _Socket.send_string _Socket_recv_string = _Socket.recv_string _Socket_send_pyobj = _Socket.send_pyobj _Socket_recv_pyobj = _Socket.recv_pyobj _Socket_send_json = _Socket.send_json _Socket_recv_json = _Socket.recv_json _Socket_getsockopt = _Socket.getsockopt class Socket(_Socket): """Green version of :class:`zmq.core.socket.Socket The following three methods are always overridden: * send * recv * getsockopt To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving is deferred to the hub (using :func:`eventlet.hubs.trampoline`) if a ``zmq.EAGAIN`` (retry) error is raised For some socket types, the following methods are also overridden: * send_multipart * recv_multipart """ def __init__(self, context, socket_type): super(Socket, self).__init__(context, socket_type) self.__dict__['_eventlet_send_event'] = _BlockedThread() self.__dict__['_eventlet_recv_event'] = _BlockedThread() self.__dict__['_eventlet_send_lock'] = _QueueLock() self.__dict__['_eventlet_recv_lock'] = _QueueLock() def event(fd): # Some events arrived at the zmq socket. This may mean # there's a message that can be read or there's space for # a message to be written. send_wake = self._eventlet_send_event.wake() recv_wake = self._eventlet_recv_event.wake() if not send_wake and not recv_wake: # if no waiting send or recv thread was woken up, then # force the zmq socket's events to be processed to # avoid repeated wakeups _Socket_getsockopt(self, EVENTS) hub = eventlet.hubs.get_hub() self.__dict__['_eventlet_listener'] = hub.add(hub.READ, self.getsockopt(FD), event, lambda _: None, lambda: None) self.__dict__['_eventlet_clock'] = hub.clock @_wraps(_Socket.close) def close(self, linger=None): super(Socket, self).close(linger) if self._eventlet_listener is not None: eventlet.hubs.get_hub().remove(self._eventlet_listener) self.__dict__['_eventlet_listener'] = None # wake any blocked threads self._eventlet_send_event.wake() self._eventlet_recv_event.wake() @_wraps(_Socket.getsockopt) def getsockopt(self, option): result = _Socket_getsockopt(self, option) if option == EVENTS: # Getting the events causes the zmq socket to process # events which may mean a msg can be sent or received. If # there is a greenthread blocked and waiting for events, # it will miss the edge-triggered read event, so wake it # up. if (result & POLLOUT): self._eventlet_send_event.wake() if (result & POLLIN): self._eventlet_recv_event.wake() return result @_wraps(_Socket.send) def send(self, msg, flags=0, copy=True, track=False): """A send method that's safe to use when multiple greenthreads are calling send, send_multipart, recv and recv_multipart on the same socket. """ if flags & NOBLOCK: result = _Socket_send(self, msg, flags, copy, track) # Instead of calling both wake methods, could call # self.getsockopt(EVENTS) which would trigger wakeups if # needed. self._eventlet_send_event.wake() self._eventlet_recv_event.wake() return result # TODO: pyzmq will copy the message buffer and create Message # objects under some circumstances. We could do that work here # once to avoid doing it every time the send is retried. flags |= NOBLOCK with self._eventlet_send_lock: while True: try: return _Socket_send(self, msg, flags, copy, track) except ZMQError as e: if e.errno == EAGAIN: self._eventlet_send_event.block() else: raise finally: # The call to send processes 0mq events and may # make the socket ready to recv. Wake the next # receiver. (Could check EVENTS for POLLIN here) self._eventlet_recv_event.wake() @_wraps(_Socket.send_multipart) def send_multipart(self, msg_parts, flags=0, copy=True, track=False): """A send_multipart method that's safe to use when multiple greenthreads are calling send, send_multipart, recv and recv_multipart on the same socket. """ if flags & NOBLOCK: return _Socket_send_multipart(self, msg_parts, flags, copy, track) # acquire lock here so the subsequent calls to send for the # message parts after the first don't block with self._eventlet_send_lock: return _Socket_send_multipart(self, msg_parts, flags, copy, track) @_wraps(_Socket.send_string) def send_string(self, u, flags=0, copy=True, encoding='utf-8'): """A send_string method that's safe to use when multiple greenthreads are calling send, send_string, recv and recv_string on the same socket. """ if flags & NOBLOCK: return _Socket_send_string(self, u, flags, copy, encoding) # acquire lock here so the subsequent calls to send for the # message parts after the first don't block with self._eventlet_send_lock: return _Socket_send_string(self, u, flags, copy, encoding) @_wraps(_Socket.send_pyobj) def send_pyobj(self, obj, flags=0, protocol=2): """A send_pyobj method that's safe to use when multiple greenthreads are calling send, send_pyobj, recv and recv_pyobj on the same socket. """ if flags & NOBLOCK: return _Socket_send_pyobj(self, obj, flags, protocol) # acquire lock here so the subsequent calls to send for the # message parts after the first don't block with self._eventlet_send_lock: return _Socket_send_pyobj(self, obj, flags, protocol) @_wraps(_Socket.send_json) def send_json(self, obj, flags=0, **kwargs): """A send_json method that's safe to use when multiple greenthreads are calling send, send_json, recv and recv_json on the same socket. """ if flags & NOBLOCK: return _Socket_send_json(self, obj, flags, **kwargs) # acquire lock here so the subsequent calls to send for the # message parts after the first don't block with self._eventlet_send_lock: return _Socket_send_json(self, obj, flags, **kwargs) @_wraps(_Socket.recv) def recv(self, flags=0, copy=True, track=False): """A recv method that's safe to use when multiple greenthreads are calling send, send_multipart, recv and recv_multipart on the same socket. """ if flags & NOBLOCK: msg = _Socket_recv(self, flags, copy, track) # Instead of calling both wake methods, could call # self.getsockopt(EVENTS) which would trigger wakeups if # needed. self._eventlet_send_event.wake() self._eventlet_recv_event.wake() return msg deadline = None if hasattr(__zmq__, 'RCVTIMEO'): sock_timeout = self.getsockopt(__zmq__.RCVTIMEO) if sock_timeout == -1: pass elif sock_timeout > 0: deadline = self._eventlet_clock() + sock_timeout / 1000.0 else: raise ValueError(sock_timeout) flags |= NOBLOCK with self._eventlet_recv_lock: while True: try: return _Socket_recv(self, flags, copy, track) except ZMQError as e: if e.errno == EAGAIN: # zmq in its wisdom decided to reuse EAGAIN for timeouts if deadline is not None and self._eventlet_clock() > deadline: e.is_timeout = True raise self._eventlet_recv_event.block(deadline=deadline) else: raise finally: # The call to recv processes 0mq events and may # make the socket ready to send. Wake the next # receiver. (Could check EVENTS for POLLOUT here) self._eventlet_send_event.wake() @_wraps(_Socket.recv_multipart) def recv_multipart(self, flags=0, copy=True, track=False): """A recv_multipart method that's safe to use when multiple greenthreads are calling send, send_multipart, recv and recv_multipart on the same socket. """ if flags & NOBLOCK: return _Socket_recv_multipart(self, flags, copy, track) # acquire lock here so the subsequent calls to recv for the # message parts after the first don't block with self._eventlet_recv_lock: return _Socket_recv_multipart(self, flags, copy, track) @_wraps(_Socket.recv_string) def recv_string(self, flags=0, encoding='utf-8'): """A recv_string method that's safe to use when multiple greenthreads are calling send, send_string, recv and recv_string on the same socket. """ if flags & NOBLOCK: return _Socket_recv_string(self, flags, encoding) # acquire lock here so the subsequent calls to recv for the # message parts after the first don't block with self._eventlet_recv_lock: return _Socket_recv_string(self, flags, encoding) @_wraps(_Socket.recv_json) def recv_json(self, flags=0, **kwargs): """A recv_json method that's safe to use when multiple greenthreads are calling send, send_json, recv and recv_json on the same socket. """ if flags & NOBLOCK: return _Socket_recv_json(self, flags, **kwargs) # acquire lock here so the subsequent calls to recv for the # message parts after the first don't block with self._eventlet_recv_lock: return _Socket_recv_json(self, flags, **kwargs) @_wraps(_Socket.recv_pyobj) def recv_pyobj(self, flags=0): """A recv_pyobj method that's safe to use when multiple greenthreads are calling send, send_pyobj, recv and recv_pyobj on the same socket. """ if flags & NOBLOCK: return _Socket_recv_pyobj(self, flags) # acquire lock here so the subsequent calls to recv for the # message parts after the first don't block with self._eventlet_recv_lock: return _Socket_recv_pyobj(self, flags) eventlet-0.30.2/eventlet/greenio/0000755000076500000240000000000014017673044017340 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/greenio/__init__.py0000644000076500000240000000025114006212666021444 0ustar temotostaff00000000000000import six from eventlet.greenio.base import * # noqa if six.PY2: from eventlet.greenio.py2 import * # noqa else: from eventlet.greenio.py3 import * # noqa eventlet-0.30.2/eventlet/greenio/base.py0000644000076500000240000004314014006212666020623 0ustar temotostaff00000000000000import errno import os import socket import sys import time import warnings import eventlet from eventlet.hubs import trampoline, notify_opened, IOClosed from eventlet.support import get_errno import six __all__ = [ 'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking', 'SOCKET_BLOCKING', 'SOCKET_CLOSED', 'CONNECT_ERR', 'CONNECT_SUCCESS', 'shutdown_safe', 'SSL', 'socket_timeout', ] BUFFER_SIZE = 4096 CONNECT_ERR = set((errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK)) CONNECT_SUCCESS = set((0, errno.EISCONN)) if sys.platform[:3] == "win": CONNECT_ERR.add(errno.WSAEINVAL) # Bug 67 if six.PY2: _python2_fileobject = socket._fileobject _original_socket = eventlet.patcher.original('socket').socket socket_timeout = eventlet.timeout.wrap_is_timeout(socket.timeout) def socket_connect(descriptor, address): """ Attempts to connect to the address, returns the descriptor if it succeeds, returns None if it needs to trampoline, and raises any exceptions. """ err = descriptor.connect_ex(address) if err in CONNECT_ERR: return None if err not in CONNECT_SUCCESS: raise socket.error(err, errno.errorcode[err]) return descriptor def socket_checkerr(descriptor): err = descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err not in CONNECT_SUCCESS: raise socket.error(err, errno.errorcode[err]) def socket_accept(descriptor): """ Attempts to accept() on the descriptor, returns a client,address tuple if it succeeds; returns None if it needs to trampoline, and raises any exceptions. """ try: return descriptor.accept() except socket.error as e: if get_errno(e) == errno.EWOULDBLOCK: return None raise if sys.platform[:3] == "win": # winsock sometimes throws ENOTCONN SOCKET_BLOCKING = set((errno.EAGAIN, errno.EWOULDBLOCK,)) SOCKET_CLOSED = set((errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN)) else: # oddly, on linux/darwin, an unconnected socket is expected to block, # so we treat ENOTCONN the same as EWOULDBLOCK SOCKET_BLOCKING = set((errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOTCONN)) SOCKET_CLOSED = set((errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE)) def set_nonblocking(fd): """ Sets the descriptor to be nonblocking. Works on many file-like objects as well as sockets. Only sockets can be nonblocking on Windows, however. """ try: setblocking = fd.setblocking except AttributeError: # fd has no setblocking() method. It could be that this version of # Python predates socket.setblocking(). In that case, we can still set # the flag "by hand" on the underlying OS fileno using the fcntl # module. try: import fcntl except ImportError: # Whoops, Windows has no fcntl module. This might not be a socket # at all, but rather a file-like object with no setblocking() # method. In particular, on Windows, pipes don't support # non-blocking I/O and therefore don't have that method. Which # means fcntl wouldn't help even if we could load it. raise NotImplementedError("set_nonblocking() on a file object " "with no setblocking() method " "(Windows pipes don't support non-blocking I/O)") # We managed to import fcntl. fileno = fd.fileno() orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL) new_flags = orig_flags | os.O_NONBLOCK if new_flags != orig_flags: fcntl.fcntl(fileno, fcntl.F_SETFL, new_flags) else: # socket supports setblocking() setblocking(0) try: from socket import _GLOBAL_DEFAULT_TIMEOUT except ImportError: _GLOBAL_DEFAULT_TIMEOUT = object() class GreenSocket(object): """ Green version of socket.socket class, that is intended to be 100% API-compatible. It also recognizes the keyword parameter, 'set_nonblocking=True'. Pass False to indicate that socket is already in non-blocking mode to save syscalls. """ # This placeholder is to prevent __getattr__ from creating an infinite call loop fd = None def __init__(self, family=socket.AF_INET, *args, **kwargs): should_set_nonblocking = kwargs.pop('set_nonblocking', True) if isinstance(family, six.integer_types): fd = _original_socket(family, *args, **kwargs) # Notify the hub that this is a newly-opened socket. notify_opened(fd.fileno()) else: fd = family # import timeout from other socket, if it was there try: self._timeout = fd.gettimeout() or socket.getdefaulttimeout() except AttributeError: self._timeout = socket.getdefaulttimeout() # Filter fd.fileno() != -1 so that won't call set non-blocking on # closed socket if should_set_nonblocking and fd.fileno() != -1: set_nonblocking(fd) self.fd = fd # when client calls setblocking(0) or settimeout(0) the socket must # act non-blocking self.act_non_blocking = False # Copy some attributes from underlying real socket. # This is the easiest way that i found to fix # https://bitbucket.org/eventlet/eventlet/issue/136 # Only `getsockopt` is required to fix that issue, others # are just premature optimization to save __getattr__ call. self.bind = fd.bind self.close = fd.close self.fileno = fd.fileno self.getsockname = fd.getsockname self.getsockopt = fd.getsockopt self.listen = fd.listen self.setsockopt = fd.setsockopt self.shutdown = fd.shutdown self._closed = False @property def _sock(self): return self if six.PY3: def _get_io_refs(self): return self.fd._io_refs def _set_io_refs(self, value): self.fd._io_refs = value _io_refs = property(_get_io_refs, _set_io_refs) # Forward unknown attributes to fd, cache the value for future use. # I do not see any simple attribute which could be changed # so caching everything in self is fine. # If we find such attributes - only attributes having __get__ might be cached. # For now - I do not want to complicate it. def __getattr__(self, name): if self.fd is None: raise AttributeError(name) attr = getattr(self.fd, name) setattr(self, name, attr) return attr def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None): """ We need to trampoline via the event hub. We catch any signal back from the hub indicating that the operation we were waiting on was associated with a filehandle that's since been invalidated. """ if self._closed: # If we did any logging, alerting to a second trampoline attempt on a closed # socket here would be useful. raise IOClosed() try: return trampoline(fd, read=read, write=write, timeout=timeout, timeout_exc=timeout_exc, mark_as_closed=self._mark_as_closed) except IOClosed: # This socket's been obsoleted. De-fang it. self._mark_as_closed() raise def accept(self): if self.act_non_blocking: res = self.fd.accept() notify_opened(res[0].fileno()) return res fd = self.fd _timeout_exc = socket_timeout('timed out') while True: res = socket_accept(fd) if res is not None: client, addr = res notify_opened(client.fileno()) set_nonblocking(client) return type(self)(client), addr self._trampoline(fd, read=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc) def _mark_as_closed(self): """ Mark this socket as being closed """ self._closed = True def __del__(self): # This is in case self.close is not assigned yet (currently the constructor does it) close = getattr(self, 'close', None) if close is not None: close() def connect(self, address): if self.act_non_blocking: return self.fd.connect(address) fd = self.fd _timeout_exc = socket_timeout('timed out') if self.gettimeout() is None: while not socket_connect(fd, address): try: self._trampoline(fd, write=True) except IOClosed: raise socket.error(errno.EBADFD) socket_checkerr(fd) else: end = time.time() + self.gettimeout() while True: if socket_connect(fd, address): return if time.time() >= end: raise _timeout_exc timeout = end - time.time() try: self._trampoline(fd, write=True, timeout=timeout, timeout_exc=_timeout_exc) except IOClosed: # ... we need some workable errno here. raise socket.error(errno.EBADFD) socket_checkerr(fd) def connect_ex(self, address): if self.act_non_blocking: return self.fd.connect_ex(address) fd = self.fd if self.gettimeout() is None: while not socket_connect(fd, address): try: self._trampoline(fd, write=True) socket_checkerr(fd) except socket.error as ex: return get_errno(ex) except IOClosed: return errno.EBADFD else: end = time.time() + self.gettimeout() timeout_exc = socket.timeout(errno.EAGAIN) while True: try: if socket_connect(fd, address): return 0 if time.time() >= end: raise timeout_exc self._trampoline(fd, write=True, timeout=end - time.time(), timeout_exc=timeout_exc) socket_checkerr(fd) except socket.error as ex: return get_errno(ex) except IOClosed: return errno.EBADFD def dup(self, *args, **kw): sock = self.fd.dup(*args, **kw) newsock = type(self)(sock, set_nonblocking=False) newsock.settimeout(self.gettimeout()) return newsock if six.PY3: def makefile(self, *args, **kwargs): return _original_socket.makefile(self, *args, **kwargs) else: def makefile(self, *args, **kwargs): dupped = self.dup() res = _python2_fileobject(dupped, *args, **kwargs) if hasattr(dupped, "_drop"): dupped._drop() # Making the close function of dupped None so that when garbage collector # kicks in and tries to call del, which will ultimately call close, _drop # doesn't get called on dupped twice as it has been already explicitly called in # previous line dupped.close = None return res def makeGreenFile(self, *args, **kw): warnings.warn("makeGreenFile has been deprecated, please use " "makefile instead", DeprecationWarning, stacklevel=2) return self.makefile(*args, **kw) def _read_trampoline(self): self._trampoline( self.fd, read=True, timeout=self.gettimeout(), timeout_exc=socket_timeout('timed out')) def _recv_loop(self, recv_meth, empty_val, *args): if self.act_non_blocking: return recv_meth(*args) while True: try: # recv: bufsize=0? # recv_into: buffer is empty? # This is needed because behind the scenes we use sockets in # nonblocking mode and builtin recv* methods. Attempting to read # 0 bytes from a nonblocking socket using a builtin recv* method # does not raise a timeout exception. Since we're simulating # a blocking socket here we need to produce a timeout exception # if needed, hence the call to trampoline. if not args[0]: self._read_trampoline() return recv_meth(*args) except socket.error as e: if get_errno(e) in SOCKET_BLOCKING: pass elif get_errno(e) in SOCKET_CLOSED: return empty_val else: raise try: self._read_trampoline() except IOClosed as e: # Perhaps we should return '' instead? raise EOFError() def recv(self, bufsize, flags=0): return self._recv_loop(self.fd.recv, b'', bufsize, flags) def recvfrom(self, bufsize, flags=0): return self._recv_loop(self.fd.recvfrom, b'', bufsize, flags) def recv_into(self, buffer, nbytes=0, flags=0): return self._recv_loop(self.fd.recv_into, 0, buffer, nbytes, flags) def recvfrom_into(self, buffer, nbytes=0, flags=0): return self._recv_loop(self.fd.recvfrom_into, 0, buffer, nbytes, flags) def _send_loop(self, send_method, data, *args): if self.act_non_blocking: return send_method(data, *args) _timeout_exc = socket_timeout('timed out') while True: try: return send_method(data, *args) except socket.error as e: eno = get_errno(e) if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING: raise try: self._trampoline(self.fd, write=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc) except IOClosed: raise socket.error(errno.ECONNRESET, 'Connection closed by another thread') def send(self, data, flags=0): return self._send_loop(self.fd.send, data, flags) def sendto(self, data, *args): return self._send_loop(self.fd.sendto, data, *args) def sendall(self, data, flags=0): tail = self.send(data, flags) len_data = len(data) while tail < len_data: tail += self.send(data[tail:], flags) def setblocking(self, flag): if flag: self.act_non_blocking = False self._timeout = None else: self.act_non_blocking = True self._timeout = 0.0 def settimeout(self, howlong): if howlong is None or howlong == _GLOBAL_DEFAULT_TIMEOUT: self.setblocking(True) return try: f = howlong.__float__ except AttributeError: raise TypeError('a float is required') howlong = f() if howlong < 0.0: raise ValueError('Timeout value out of range') if howlong == 0.0: self.act_non_blocking = True self._timeout = 0.0 else: self.act_non_blocking = False self._timeout = howlong def gettimeout(self): return self._timeout def __enter__(self): return self def __exit__(self, *args): self.close() if "__pypy__" in sys.builtin_module_names: def _reuse(self): getattr(self.fd, '_sock', self.fd)._reuse() def _drop(self): getattr(self.fd, '_sock', self.fd)._drop() def _operation_on_closed_file(*args, **kwargs): raise ValueError("I/O operation on closed file") greenpipe_doc = """ GreenPipe is a cooperative replacement for file class. It will cooperate on pipes. It will block on regular file. Differences from file class: - mode is r/w property. Should re r/o - encoding property not implemented - write/writelines will not raise TypeError exception when non-string data is written it will write str(data) instead - Universal new lines are not supported and newlines property not implementeded - file argument can be descriptor, file name or file object. """ # import SSL module here so we can refer to greenio.SSL.exceptionclass try: from OpenSSL import SSL except ImportError: # pyOpenSSL not installed, define exceptions anyway for convenience class SSL(object): class WantWriteError(Exception): pass class WantReadError(Exception): pass class ZeroReturnError(Exception): pass class SysCallError(Exception): pass def shutdown_safe(sock): """Shuts down the socket. This is a convenience method for code that wants to gracefully handle regular sockets, SSL.Connection sockets from PyOpenSSL and ssl.SSLSocket objects from Python 2.7 interchangeably. Both types of ssl socket require a shutdown() before close, but they have different arity on their shutdown method. Regular sockets don't need a shutdown before close, but it doesn't hurt. """ try: try: # socket, ssl.SSLSocket return sock.shutdown(socket.SHUT_RDWR) except TypeError: # SSL.Connection return sock.shutdown() except socket.error as e: # we don't care if the socket is already closed; # this will often be the case in an http server context if get_errno(e) not in (errno.ENOTCONN, errno.EBADF, errno.ENOTSOCK): raise eventlet-0.30.2/eventlet/greenio/py2.py0000644000076500000240000001523614006212666020430 0ustar temotostaff00000000000000import errno import os from eventlet.greenio.base import ( _operation_on_closed_file, greenpipe_doc, set_nonblocking, socket, SOCKET_BLOCKING, ) from eventlet.hubs import trampoline, notify_close, notify_opened, IOClosed from eventlet.support import get_errno import six __all__ = ['_fileobject', 'GreenPipe'] _fileobject = socket._fileobject class GreenPipe(_fileobject): __doc__ = greenpipe_doc def __init__(self, f, mode='r', bufsize=-1): if not isinstance(f, six.string_types + (int, file)): raise TypeError('f(ile) should be int, str, unicode or file, not %r' % f) if isinstance(f, six.string_types): f = open(f, mode, 0) if isinstance(f, int): fileno = f self._name = "" % fileno else: fileno = os.dup(f.fileno()) self._name = f.name if f.mode != mode: raise ValueError('file.mode %r does not match mode parameter %r' % (f.mode, mode)) self._name = f.name f.close() super(GreenPipe, self).__init__(_SocketDuckForFd(fileno), mode, bufsize) set_nonblocking(self) self.softspace = 0 @property def name(self): return self._name def __repr__(self): return "<%s %s %r, mode %r at 0x%x>" % ( self.closed and 'closed' or 'open', self.__class__.__name__, self.name, self.mode, (id(self) < 0) and (sys.maxint + id(self)) or id(self)) def close(self): super(GreenPipe, self).close() for method in [ 'fileno', 'flush', 'isatty', 'next', 'read', 'readinto', 'readline', 'readlines', 'seek', 'tell', 'truncate', 'write', 'xreadlines', '__iter__', '__next__', 'writelines']: setattr(self, method, _operation_on_closed_file) def __enter__(self): return self def __exit__(self, *args): self.close() def _get_readahead_len(self): return len(self._rbuf.getvalue()) def _clear_readahead_buf(self): len = self._get_readahead_len() if len > 0: self.read(len) def tell(self): self.flush() try: return os.lseek(self.fileno(), 0, 1) - self._get_readahead_len() except OSError as e: raise IOError(*e.args) def seek(self, offset, whence=0): self.flush() if whence == 1 and offset == 0: # tell synonym return self.tell() if whence == 1: # adjust offset by what is read ahead offset -= self._get_readahead_len() try: rv = os.lseek(self.fileno(), offset, whence) except OSError as e: raise IOError(*e.args) else: self._clear_readahead_buf() return rv if getattr(file, "truncate", None): # not all OSes implement truncate def truncate(self, size=-1): self.flush() if size == -1: size = self.tell() try: rv = os.ftruncate(self.fileno(), size) except OSError as e: raise IOError(*e.args) else: self.seek(size) # move position&clear buffer return rv def isatty(self): try: return os.isatty(self.fileno()) except OSError as e: raise IOError(*e.args) class _SocketDuckForFd(object): """Class implementing all socket method used by _fileobject in cooperative manner using low level os I/O calls. """ _refcount = 0 def __init__(self, fileno): self._fileno = fileno notify_opened(fileno) self._closed = False def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None): if self._closed: # Don't trampoline if we're already closed. raise IOClosed() try: return trampoline(fd, read=read, write=write, timeout=timeout, timeout_exc=timeout_exc, mark_as_closed=self._mark_as_closed) except IOClosed: # Our fileno has been obsoleted. Defang ourselves to # prevent spurious closes. self._mark_as_closed() raise def _mark_as_closed(self): current = self._closed self._closed = True return current @property def _sock(self): return self def fileno(self): return self._fileno def recv(self, buflen): while True: try: data = os.read(self._fileno, buflen) return data except OSError as e: if get_errno(e) not in SOCKET_BLOCKING: raise IOError(*e.args) self._trampoline(self, read=True) def recv_into(self, buf, nbytes=0, flags=0): if nbytes == 0: nbytes = len(buf) data = self.recv(nbytes) buf[:nbytes] = data return len(data) def send(self, data): while True: try: return os.write(self._fileno, data) except OSError as e: if get_errno(e) not in SOCKET_BLOCKING: raise IOError(*e.args) else: trampoline(self, write=True) def sendall(self, data): len_data = len(data) os_write = os.write fileno = self._fileno try: total_sent = os_write(fileno, data) except OSError as e: if get_errno(e) != errno.EAGAIN: raise IOError(*e.args) total_sent = 0 while total_sent < len_data: self._trampoline(self, write=True) try: total_sent += os_write(fileno, data[total_sent:]) except OSError as e: if get_errno(e) != errno. EAGAIN: raise IOError(*e.args) def __del__(self): self._close() def _close(self): was_closed = self._mark_as_closed() if was_closed: return if notify_close: # If closing from __del__, notify_close may have # already been cleaned up and set to None notify_close(self._fileno) try: os.close(self._fileno) except: # os.close may fail if __init__ didn't complete # (i.e file dscriptor passed to popen was invalid pass def __repr__(self): return "%s:%d" % (self.__class__.__name__, self._fileno) def _reuse(self): self._refcount += 1 def _drop(self): self._refcount -= 1 if self._refcount == 0: self._close() eventlet-0.30.2/eventlet/greenio/py3.py0000644000076500000240000001436014006212666020426 0ustar temotostaff00000000000000import _pyio as _original_pyio import errno import os as _original_os import socket as _original_socket from io import ( BufferedRandom as _OriginalBufferedRandom, BufferedReader as _OriginalBufferedReader, BufferedWriter as _OriginalBufferedWriter, DEFAULT_BUFFER_SIZE, TextIOWrapper as _OriginalTextIOWrapper, IOBase as _OriginalIOBase, ) from types import FunctionType from eventlet.greenio.base import ( _operation_on_closed_file, greenpipe_doc, set_nonblocking, SOCKET_BLOCKING, ) from eventlet.hubs import notify_close, notify_opened, IOClosed, trampoline from eventlet.support import get_errno import six __all__ = ['_fileobject', 'GreenPipe'] # TODO get rid of this, it only seems like the original _fileobject _fileobject = _original_socket.SocketIO # Large part of the following code is copied from the original # eventlet.greenio module class GreenFileIO(_OriginalIOBase): def __init__(self, name, mode='r', closefd=True, opener=None): if isinstance(name, int): fileno = name self._name = "" % fileno else: assert isinstance(name, six.string_types) with open(name, mode) as fd: self._name = fd.name fileno = _original_os.dup(fd.fileno()) notify_opened(fileno) self._fileno = fileno self._mode = mode self._closed = False set_nonblocking(self) self._seekable = None @property def closed(self): return self._closed def seekable(self): if self._seekable is None: try: _original_os.lseek(self._fileno, 0, _original_os.SEEK_CUR) except IOError as e: if get_errno(e) == errno.ESPIPE: self._seekable = False else: raise else: self._seekable = True return self._seekable def readable(self): return 'r' in self._mode or '+' in self._mode def writable(self): return 'w' in self._mode or '+' in self._mode def fileno(self): return self._fileno def read(self, size=-1): if size == -1: return self.readall() while True: try: return _original_os.read(self._fileno, size) except OSError as e: if get_errno(e) not in SOCKET_BLOCKING: raise IOError(*e.args) self._trampoline(self, read=True) def readall(self): buf = [] while True: try: chunk = _original_os.read(self._fileno, DEFAULT_BUFFER_SIZE) if chunk == b'': return b''.join(buf) buf.append(chunk) except OSError as e: if get_errno(e) not in SOCKET_BLOCKING: raise IOError(*e.args) self._trampoline(self, read=True) def readinto(self, b): up_to = len(b) data = self.read(up_to) bytes_read = len(data) b[:bytes_read] = data return bytes_read def isatty(self): try: return _original_os.isatty(self.fileno()) except OSError as e: raise IOError(*e.args) def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None): if self._closed: # Don't trampoline if we're already closed. raise IOClosed() try: return trampoline(fd, read=read, write=write, timeout=timeout, timeout_exc=timeout_exc, mark_as_closed=self._mark_as_closed) except IOClosed: # Our fileno has been obsoleted. Defang ourselves to # prevent spurious closes. self._mark_as_closed() raise def _mark_as_closed(self): """ Mark this socket as being closed """ self._closed = True def write(self, data): view = memoryview(data) datalen = len(data) offset = 0 while offset < datalen: try: written = _original_os.write(self._fileno, view[offset:]) except OSError as e: if get_errno(e) not in SOCKET_BLOCKING: raise IOError(*e.args) trampoline(self, write=True) else: offset += written return offset def close(self): if not self._closed: self._closed = True _original_os.close(self._fileno) notify_close(self._fileno) for method in [ 'fileno', 'flush', 'isatty', 'next', 'read', 'readinto', 'readline', 'readlines', 'seek', 'tell', 'truncate', 'write', 'xreadlines', '__iter__', '__next__', 'writelines']: setattr(self, method, _operation_on_closed_file) def truncate(self, size=-1): if size == -1: size = self.tell() try: rv = _original_os.ftruncate(self._fileno, size) except OSError as e: raise IOError(*e.args) else: self.seek(size) # move position&clear buffer return rv def seek(self, offset, whence=_original_os.SEEK_SET): try: return _original_os.lseek(self._fileno, offset, whence) except OSError as e: raise IOError(*e.args) def __enter__(self): return self def __exit__(self, *args): self.close() _open_environment = dict(globals()) _open_environment.update(dict( BufferedRandom=_OriginalBufferedRandom, BufferedWriter=_OriginalBufferedWriter, BufferedReader=_OriginalBufferedReader, TextIOWrapper=_OriginalTextIOWrapper, FileIO=GreenFileIO, os=_original_os, )) _open = FunctionType( six.get_function_code(_original_pyio.open), _open_environment, ) def GreenPipe(name, mode="r", buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None): try: fileno = name.fileno() except AttributeError: pass else: fileno = _original_os.dup(fileno) name.close() name = fileno return _open(name, mode, buffering, encoding, errors, newline, closefd, opener) GreenPipe.__doc__ = greenpipe_doc eventlet-0.30.2/eventlet/greenpool.py0000644000076500000240000002314114006212666020252 0ustar temotostaff00000000000000import traceback import eventlet from eventlet import queue from eventlet.support import greenlets as greenlet import six __all__ = ['GreenPool', 'GreenPile'] DEBUG = True class GreenPool(object): """The GreenPool class is a pool of green threads. """ def __init__(self, size=1000): try: size = int(size) except ValueError as e: msg = 'GreenPool() expect size :: int, actual: {0} {1}'.format(type(size), str(e)) raise TypeError(msg) if size < 0: msg = 'GreenPool() expect size >= 0, actual: {0}'.format(repr(size)) raise ValueError(msg) self.size = size self.coroutines_running = set() self.sem = eventlet.Semaphore(size) self.no_coros_running = eventlet.Event() def resize(self, new_size): """ Change the max number of greenthreads doing work at any given time. If resize is called when there are more than *new_size* greenthreads already working on tasks, they will be allowed to complete but no new tasks will be allowed to get launched until enough greenthreads finish their tasks to drop the overall quantity below *new_size*. Until then, the return value of free() will be negative. """ size_delta = new_size - self.size self.sem.counter += size_delta self.size = new_size def running(self): """ Returns the number of greenthreads that are currently executing functions in the GreenPool.""" return len(self.coroutines_running) def free(self): """ Returns the number of greenthreads available for use. If zero or less, the next call to :meth:`spawn` or :meth:`spawn_n` will block the calling greenthread until a slot becomes available.""" return self.sem.counter def spawn(self, function, *args, **kwargs): """Run the *function* with its arguments in its own green thread. Returns the :class:`GreenThread ` object that is running the function, which can be used to retrieve the results. If the pool is currently at capacity, ``spawn`` will block until one of the running greenthreads completes its task and frees up a slot. This function is reentrant; *function* can call ``spawn`` on the same pool without risk of deadlocking the whole thing. """ # if reentering an empty pool, don't try to wait on a coroutine freeing # itself -- instead, just execute in the current coroutine current = eventlet.getcurrent() if self.sem.locked() and current in self.coroutines_running: # a bit hacky to use the GT without switching to it gt = eventlet.greenthread.GreenThread(current) gt.main(function, args, kwargs) return gt else: self.sem.acquire() gt = eventlet.spawn(function, *args, **kwargs) if not self.coroutines_running: self.no_coros_running = eventlet.Event() self.coroutines_running.add(gt) gt.link(self._spawn_done) return gt def _spawn_n_impl(self, func, args, kwargs, coro): try: try: func(*args, **kwargs) except (KeyboardInterrupt, SystemExit, greenlet.GreenletExit): raise except: if DEBUG: traceback.print_exc() finally: if coro is None: return else: coro = eventlet.getcurrent() self._spawn_done(coro) def spawn_n(self, function, *args, **kwargs): """Create a greenthread to run the *function*, the same as :meth:`spawn`. The difference is that :meth:`spawn_n` returns None; the results of *function* are not retrievable. """ # if reentering an empty pool, don't try to wait on a coroutine freeing # itself -- instead, just execute in the current coroutine current = eventlet.getcurrent() if self.sem.locked() and current in self.coroutines_running: self._spawn_n_impl(function, args, kwargs, None) else: self.sem.acquire() g = eventlet.spawn_n( self._spawn_n_impl, function, args, kwargs, True) if not self.coroutines_running: self.no_coros_running = eventlet.Event() self.coroutines_running.add(g) def waitall(self): """Waits until all greenthreads in the pool are finished working.""" assert eventlet.getcurrent() not in self.coroutines_running, \ "Calling waitall() from within one of the " \ "GreenPool's greenthreads will never terminate." if self.running(): self.no_coros_running.wait() def _spawn_done(self, coro): self.sem.release() if coro is not None: self.coroutines_running.remove(coro) # if done processing (no more work is waiting for processing), # we can finish off any waitall() calls that might be pending if self.sem.balance == self.size: self.no_coros_running.send(None) def waiting(self): """Return the number of greenthreads waiting to spawn. """ if self.sem.balance < 0: return -self.sem.balance else: return 0 def _do_map(self, func, it, gi): for args in it: gi.spawn(func, *args) gi.done_spawning() def starmap(self, function, iterable): """This is the same as :func:`itertools.starmap`, except that *func* is executed in a separate green thread for each item, with the concurrency limited by the pool's size. In operation, starmap consumes a constant amount of memory, proportional to the size of the pool, and is thus suited for iterating over extremely long input lists. """ if function is None: function = lambda *a: a # We use a whole separate greenthread so its spawn() calls can block # without blocking OUR caller. On the other hand, we must assume that # our caller will immediately start trying to iterate over whatever we # return. If that were a GreenPile, our caller would always see an # empty sequence because the hub hasn't even entered _do_map() yet -- # _do_map() hasn't had a chance to spawn a single greenthread on this # GreenPool! A GreenMap is safe to use with different producer and # consumer greenthreads, because it doesn't raise StopIteration until # the producer has explicitly called done_spawning(). gi = GreenMap(self.size) eventlet.spawn_n(self._do_map, function, iterable, gi) return gi def imap(self, function, *iterables): """This is the same as :func:`itertools.imap`, and has the same concurrency and memory behavior as :meth:`starmap`. It's quite convenient for, e.g., farming out jobs from a file:: def worker(line): return do_something(line) pool = GreenPool() for result in pool.imap(worker, open("filename", 'r')): print(result) """ return self.starmap(function, six.moves.zip(*iterables)) class GreenPile(object): """GreenPile is an abstraction representing a bunch of I/O-related tasks. Construct a GreenPile with an existing GreenPool object. The GreenPile will then use that pool's concurrency as it processes its jobs. There can be many GreenPiles associated with a single GreenPool. A GreenPile can also be constructed standalone, not associated with any GreenPool. To do this, construct it with an integer size parameter instead of a GreenPool. It is not advisable to iterate over a GreenPile in a different greenthread than the one which is calling spawn. The iterator will exit early in that situation. """ def __init__(self, size_or_pool=1000): if isinstance(size_or_pool, GreenPool): self.pool = size_or_pool else: self.pool = GreenPool(size_or_pool) self.waiters = queue.LightQueue() self.counter = 0 def spawn(self, func, *args, **kw): """Runs *func* in its own green thread, with the result available by iterating over the GreenPile object.""" self.counter += 1 try: gt = self.pool.spawn(func, *args, **kw) self.waiters.put(gt) except: self.counter -= 1 raise def __iter__(self): return self def next(self): """Wait for the next result, suspending the current greenthread until it is available. Raises StopIteration when there are no more results.""" if self.counter == 0: raise StopIteration() return self._next() __next__ = next def _next(self): try: return self.waiters.get().wait() finally: self.counter -= 1 # this is identical to GreenPile but it blocks on spawn if the results # aren't consumed, and it doesn't generate its own StopIteration exception, # instead relying on the spawning process to send one in when it's done class GreenMap(GreenPile): def __init__(self, size_or_pool): super(GreenMap, self).__init__(size_or_pool) self.waiters = queue.LightQueue(maxsize=self.pool.size) def done_spawning(self): self.spawn(lambda: StopIteration()) def next(self): val = self._next() if isinstance(val, StopIteration): raise val else: return val __next__ = next eventlet-0.30.2/eventlet/greenthread.py0000644000076500000240000002651414006212666020557 0ustar temotostaff00000000000000from collections import deque import sys from eventlet import event from eventlet import hubs from eventlet import support from eventlet import timeout from eventlet.hubs import timer from eventlet.support import greenlets as greenlet import six import warnings __all__ = ['getcurrent', 'sleep', 'spawn', 'spawn_n', 'kill', 'spawn_after', 'spawn_after_local', 'GreenThread'] getcurrent = greenlet.getcurrent def sleep(seconds=0): """Yield control to another eligible coroutine until at least *seconds* have elapsed. *seconds* may be specified as an integer, or a float if fractional seconds are desired. Calling :func:`~greenthread.sleep` with *seconds* of 0 is the canonical way of expressing a cooperative yield. For example, if one is looping over a large list performing an expensive calculation without calling any socket methods, it's a good idea to call ``sleep(0)`` occasionally; otherwise nothing else will run. """ hub = hubs.get_hub() current = getcurrent() assert hub.greenlet is not current, 'do not call blocking functions from the mainloop' timer = hub.schedule_call_global(seconds, current.switch) try: hub.switch() finally: timer.cancel() def spawn(func, *args, **kwargs): """Create a greenthread to run ``func(*args, **kwargs)``. Returns a :class:`GreenThread` object which you can use to get the results of the call. Execution control returns immediately to the caller; the created greenthread is merely scheduled to be run at the next available opportunity. Use :func:`spawn_after` to arrange for greenthreads to be spawned after a finite delay. """ hub = hubs.get_hub() g = GreenThread(hub.greenlet) hub.schedule_call_global(0, g.switch, func, args, kwargs) return g def spawn_n(func, *args, **kwargs): """Same as :func:`spawn`, but returns a ``greenlet`` object from which it is not possible to retrieve either a return value or whether it raised any exceptions. This is faster than :func:`spawn`; it is fastest if there are no keyword arguments. If an exception is raised in the function, spawn_n prints a stack trace; the print can be disabled by calling :func:`eventlet.debug.hub_exceptions` with False. """ return _spawn_n(0, func, args, kwargs)[1] def spawn_after(seconds, func, *args, **kwargs): """Spawns *func* after *seconds* have elapsed. It runs as scheduled even if the current greenthread has completed. *seconds* may be specified as an integer, or a float if fractional seconds are desired. The *func* will be called with the given *args* and keyword arguments *kwargs*, and will be executed within its own greenthread. The return value of :func:`spawn_after` is a :class:`GreenThread` object, which can be used to retrieve the results of the call. To cancel the spawn and prevent *func* from being called, call :meth:`GreenThread.cancel` on the return value of :func:`spawn_after`. This will not abort the function if it's already started running, which is generally the desired behavior. If terminating *func* regardless of whether it's started or not is the desired behavior, call :meth:`GreenThread.kill`. """ hub = hubs.get_hub() g = GreenThread(hub.greenlet) hub.schedule_call_global(seconds, g.switch, func, args, kwargs) return g def spawn_after_local(seconds, func, *args, **kwargs): """Spawns *func* after *seconds* have elapsed. The function will NOT be called if the current greenthread has exited. *seconds* may be specified as an integer, or a float if fractional seconds are desired. The *func* will be called with the given *args* and keyword arguments *kwargs*, and will be executed within its own greenthread. The return value of :func:`spawn_after` is a :class:`GreenThread` object, which can be used to retrieve the results of the call. To cancel the spawn and prevent *func* from being called, call :meth:`GreenThread.cancel` on the return value. This will not abort the function if it's already started running. If terminating *func* regardless of whether it's started or not is the desired behavior, call :meth:`GreenThread.kill`. """ hub = hubs.get_hub() g = GreenThread(hub.greenlet) hub.schedule_call_local(seconds, g.switch, func, args, kwargs) return g def call_after_global(seconds, func, *args, **kwargs): warnings.warn( "call_after_global is renamed to spawn_after, which" "has the same signature and semantics (plus a bit extra). Please do a" " quick search-and-replace on your codebase, thanks!", DeprecationWarning, stacklevel=2) return _spawn_n(seconds, func, args, kwargs)[0] def call_after_local(seconds, function, *args, **kwargs): warnings.warn( "call_after_local is renamed to spawn_after_local, which" "has the same signature and semantics (plus a bit extra).", DeprecationWarning, stacklevel=2) hub = hubs.get_hub() g = greenlet.greenlet(function, parent=hub.greenlet) t = hub.schedule_call_local(seconds, g.switch, *args, **kwargs) return t call_after = call_after_local def exc_after(seconds, *throw_args): warnings.warn("Instead of exc_after, which is deprecated, use " "Timeout(seconds, exception)", DeprecationWarning, stacklevel=2) if seconds is None: # dummy argument, do nothing return timer.Timer(seconds, lambda: None) hub = hubs.get_hub() return hub.schedule_call_local(seconds, getcurrent().throw, *throw_args) # deprecate, remove TimeoutError, with_timeout = ( support.wrap_deprecated(old, new)(fun) for old, new, fun in ( ('greenthread.TimeoutError', 'Timeout', timeout.Timeout), ('greenthread.with_timeout', 'with_timeout', timeout.with_timeout), )) def _spawn_n(seconds, func, args, kwargs): hub = hubs.get_hub() g = greenlet.greenlet(func, parent=hub.greenlet) t = hub.schedule_call_global(seconds, g.switch, *args, **kwargs) return t, g class GreenThread(greenlet.greenlet): """The GreenThread class is a type of Greenlet which has the additional property of being able to retrieve the return value of the main function. Do not construct GreenThread objects directly; call :func:`spawn` to get one. """ def __init__(self, parent): greenlet.greenlet.__init__(self, self.main, parent) self._exit_event = event.Event() self._resolving_links = False self._exit_funcs = None def wait(self): """ Returns the result of the main function of this GreenThread. If the result is a normal return value, :meth:`wait` returns it. If it raised an exception, :meth:`wait` will raise the same exception (though the stack trace will unavoidably contain some frames from within the greenthread module).""" return self._exit_event.wait() def link(self, func, *curried_args, **curried_kwargs): """ Set up a function to be called with the results of the GreenThread. The function must have the following signature:: def func(gt, [curried args/kwargs]): When the GreenThread finishes its run, it calls *func* with itself and with the `curried arguments `_ supplied at link-time. If the function wants to retrieve the result of the GreenThread, it should call wait() on its first argument. Note that *func* is called within execution context of the GreenThread, so it is possible to interfere with other linked functions by doing things like switching explicitly to another greenthread. """ if self._exit_funcs is None: self._exit_funcs = deque() self._exit_funcs.append((func, curried_args, curried_kwargs)) if self._exit_event.ready(): self._resolve_links() def unlink(self, func, *curried_args, **curried_kwargs): """ remove linked function set by :meth:`link` Remove successfully return True, otherwise False """ if not self._exit_funcs: return False try: self._exit_funcs.remove((func, curried_args, curried_kwargs)) return True except ValueError: return False def main(self, function, args, kwargs): try: result = function(*args, **kwargs) except: self._exit_event.send_exception(*sys.exc_info()) self._resolve_links() raise else: self._exit_event.send(result) self._resolve_links() def _resolve_links(self): # ca and ckw are the curried function arguments if self._resolving_links: return if not self._exit_funcs: return self._resolving_links = True try: while self._exit_funcs: f, ca, ckw = self._exit_funcs.popleft() f(self, *ca, **ckw) finally: self._resolving_links = False def kill(self, *throw_args): """Kills the greenthread using :func:`kill`. After being killed all calls to :meth:`wait` will raise *throw_args* (which default to :class:`greenlet.GreenletExit`).""" return kill(self, *throw_args) def cancel(self, *throw_args): """Kills the greenthread using :func:`kill`, but only if it hasn't already started running. After being canceled, all calls to :meth:`wait` will raise *throw_args* (which default to :class:`greenlet.GreenletExit`).""" return cancel(self, *throw_args) def cancel(g, *throw_args): """Like :func:`kill`, but only terminates the greenthread if it hasn't already started execution. If the grenthread has already started execution, :func:`cancel` has no effect.""" if not g: kill(g, *throw_args) def kill(g, *throw_args): """Terminates the target greenthread by raising an exception into it. Whatever that greenthread might be doing; be it waiting for I/O or another primitive, it sees an exception right away. By default, this exception is GreenletExit, but a specific exception may be specified. *throw_args* should be the same as the arguments to raise; either an exception instance or an exc_info tuple. Calling :func:`kill` causes the calling greenthread to cooperatively yield. """ if g.dead: return hub = hubs.get_hub() if not g: # greenlet hasn't started yet and therefore throw won't work # on its own; semantically we want it to be as though the main # method never got called def just_raise(*a, **kw): if throw_args: six.reraise(throw_args[0], throw_args[1], throw_args[2]) else: raise greenlet.GreenletExit() g.run = just_raise if isinstance(g, GreenThread): # it's a GreenThread object, so we want to call its main # method to take advantage of the notification try: g.main(just_raise, (), {}) except: pass current = getcurrent() if current is not hub.greenlet: # arrange to wake the caller back up immediately hub.ensure_greenlet() hub.schedule_call_global(0, current.switch) g.throw(*throw_args) eventlet-0.30.2/eventlet/hubs/0000755000076500000240000000000014017673044016651 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/hubs/__init__.py0000644000076500000240000001364714006212666020772 0ustar temotostaff00000000000000import importlib import inspect import os import warnings from eventlet import patcher from eventlet.support import greenlets as greenlet import six __all__ = ["use_hub", "get_hub", "get_default_hub", "trampoline"] threading = patcher.original('threading') _threadlocal = threading.local() # order is important, get_default_hub returns first available from here builtin_hub_names = ('epolls', 'kqueue', 'poll', 'selects') builtin_hub_modules = tuple(importlib.import_module('eventlet.hubs.' + name) for name in builtin_hub_names) class HubError(Exception): pass def get_default_hub(): """Select the default hub implementation based on what multiplexing libraries are installed. The order that the hubs are tried is: * epoll * kqueue * poll * select It won't automatically select the pyevent hub, because it's not python-thread-safe. .. include:: ../doc/common.txt .. note :: |internal| """ for mod in builtin_hub_modules: if mod.is_available(): return mod raise HubError('no built-in hubs are available: {}'.format(builtin_hub_modules)) def use_hub(mod=None): """Use the module *mod*, containing a class called Hub, as the event hub. Usually not required; the default hub is usually fine. `mod` can be an actual hub class, a module, a string, or None. If `mod` is a class, use it directly. If `mod` is a module, use `module.Hub` class If `mod` is a string and contains either '.' or ':' then `use_hub` uses 'package.subpackage.module:Class' convention, otherwise imports `eventlet.hubs.mod`. If `mod` is None, `use_hub` uses the default hub. Only call use_hub during application initialization, because it resets the hub's state and any existing timers or listeners will never be resumed. These two threadlocal attributes are not part of Eventlet public API: - `threadlocal.Hub` (capital H) is hub constructor, used when no hub is currently active - `threadlocal.hub` (lowercase h) is active hub instance """ if mod is None: mod = os.environ.get('EVENTLET_HUB', None) if mod is None: mod = get_default_hub() if hasattr(_threadlocal, 'hub'): del _threadlocal.hub classname = '' if isinstance(mod, six.string_types): assert mod.strip(), "Need to specify a hub" if '.' in mod or ':' in mod: modulename, _, classname = mod.strip().partition(':') else: modulename = 'eventlet.hubs.' + mod mod = importlib.import_module(modulename) if hasattr(mod, 'is_available'): if not mod.is_available(): raise Exception('selected hub is not available on this system mod={}'.format(mod)) else: msg = '''Please provide `is_available()` function in your custom Eventlet hub {mod}. It must return bool: whether hub supports current platform. See eventlet/hubs/{{epoll,kqueue}} for example. '''.format(mod=mod) warnings.warn(msg, DeprecationWarning, stacklevel=3) hubclass = mod if not inspect.isclass(mod): hubclass = getattr(mod, classname or 'Hub') _threadlocal.Hub = hubclass def get_hub(): """Get the current event hub singleton object. .. note :: |internal| """ try: hub = _threadlocal.hub except AttributeError: try: _threadlocal.Hub except AttributeError: use_hub() hub = _threadlocal.hub = _threadlocal.Hub() return hub # Lame middle file import because complex dependencies in import graph from eventlet import timeout def trampoline(fd, read=None, write=None, timeout=None, timeout_exc=timeout.Timeout, mark_as_closed=None): """Suspend the current coroutine until the given socket object or file descriptor is ready to *read*, ready to *write*, or the specified *timeout* elapses, depending on arguments specified. To wait for *fd* to be ready to read, pass *read* ``=True``; ready to write, pass *write* ``=True``. To specify a timeout, pass the *timeout* argument in seconds. If the specified *timeout* elapses before the socket is ready to read or write, *timeout_exc* will be raised instead of ``trampoline()`` returning normally. .. note :: |internal| """ t = None hub = get_hub() current = greenlet.getcurrent() assert hub.greenlet is not current, 'do not call blocking functions from the mainloop' assert not ( read and write), 'not allowed to trampoline for reading and writing' try: fileno = fd.fileno() except AttributeError: fileno = fd if timeout is not None: def _timeout(exc): # This is only useful to insert debugging current.throw(exc) t = hub.schedule_call_global(timeout, _timeout, timeout_exc) try: if read: listener = hub.add(hub.READ, fileno, current.switch, current.throw, mark_as_closed) elif write: listener = hub.add(hub.WRITE, fileno, current.switch, current.throw, mark_as_closed) try: return hub.switch() finally: hub.remove(listener) finally: if t is not None: t.cancel() def notify_close(fd): """ A particular file descriptor has been explicitly closed. Register for any waiting listeners to be notified on the next run loop. """ hub = get_hub() hub.notify_close(fd) def notify_opened(fd): """ Some file descriptors may be closed 'silently' - that is, by the garbage collector, by an external library, etc. When the OS returns a file descriptor from an open call (or something similar), this may be the only indication we have that the FD has been closed and then recycled. We let the hub know that the old file descriptor is dead; any stuck listeners will be disabled and notified in turn. """ hub = get_hub() hub.mark_as_reopened(fd) class IOClosed(IOError): pass eventlet-0.30.2/eventlet/hubs/epolls.py0000644000076500000240000000200314006212666020511 0ustar temotostaff00000000000000import errno from eventlet import patcher, support from eventlet.hubs import hub, poll select = patcher.original('select') def is_available(): return hasattr(select, 'epoll') # NOTE: we rely on the fact that the epoll flag constants # are identical in value to the poll constants class Hub(poll.Hub): def __init__(self, clock=None): super(Hub, self).__init__(clock=clock) self.poll = select.epoll() def add(self, evtype, fileno, cb, tb, mac): oldlisteners = bool(self.listeners[self.READ].get(fileno) or self.listeners[self.WRITE].get(fileno)) # not super() to avoid double register() listener = hub.BaseHub.add(self, evtype, fileno, cb, tb, mac) try: self.register(fileno, new=not oldlisteners) except IOError as ex: # ignore EEXIST, #80 if support.get_errno(ex) != errno.EEXIST: raise return listener def do_poll(self, seconds): return self.poll.poll(seconds) eventlet-0.30.2/eventlet/hubs/hub.py0000644000076500000240000004263114006212666020004 0ustar temotostaff00000000000000import errno import heapq import math import signal import sys import traceback arm_alarm = None if hasattr(signal, 'setitimer'): def alarm_itimer(seconds): signal.setitimer(signal.ITIMER_REAL, seconds) arm_alarm = alarm_itimer else: try: import itimer arm_alarm = itimer.alarm except ImportError: def alarm_signal(seconds): signal.alarm(math.ceil(seconds)) arm_alarm = alarm_signal import eventlet.hubs from eventlet.hubs import timer from eventlet.support import greenlets as greenlet, clear_sys_exc_info try: from monotonic import monotonic except ImportError: from time import monotonic import six g_prevent_multiple_readers = True READ = "read" WRITE = "write" def closed_callback(fileno): """ Used to de-fang a callback that may be triggered by a loop in BaseHub.wait """ # No-op. pass class FdListener(object): def __init__(self, evtype, fileno, cb, tb, mark_as_closed): """ The following are required: cb - the standard callback, which will switch into the listening greenlet to indicate that the event waited upon is ready tb - a 'throwback'. This is typically greenlet.throw, used to raise a signal into the target greenlet indicating that an event was obsoleted by its underlying filehandle being repurposed. mark_as_closed - if any listener is obsoleted, this is called (in the context of some other client greenlet) to alert underlying filehandle-wrapping objects that they've been closed. """ assert (evtype is READ or evtype is WRITE) self.evtype = evtype self.fileno = fileno self.cb = cb self.tb = tb self.mark_as_closed = mark_as_closed self.spent = False self.greenlet = greenlet.getcurrent() def __repr__(self): return "%s(%r, %r, %r, %r)" % (type(self).__name__, self.evtype, self.fileno, self.cb, self.tb) __str__ = __repr__ def defang(self): self.cb = closed_callback if self.mark_as_closed is not None: self.mark_as_closed() self.spent = True noop = FdListener(READ, 0, lambda x: None, lambda x: None, None) # in debug mode, track the call site that created the listener class DebugListener(FdListener): def __init__(self, evtype, fileno, cb, tb, mark_as_closed): self.where_called = traceback.format_stack() self.greenlet = greenlet.getcurrent() super(DebugListener, self).__init__(evtype, fileno, cb, tb, mark_as_closed) def __repr__(self): return "DebugListener(%r, %r, %r, %r, %r, %r)\n%sEndDebugFdListener" % ( self.evtype, self.fileno, self.cb, self.tb, self.mark_as_closed, self.greenlet, ''.join(self.where_called)) __str__ = __repr__ def alarm_handler(signum, frame): import inspect raise RuntimeError("Blocking detector ALARMED at" + str(inspect.getframeinfo(frame))) class BaseHub(object): """ Base hub class for easing the implementation of subclasses that are specific to a particular underlying event architecture. """ SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit) READ = READ WRITE = WRITE def __init__(self, clock=None): self.listeners = {READ: {}, WRITE: {}} self.secondaries = {READ: {}, WRITE: {}} self.closed = [] if clock is None: clock = monotonic self.clock = clock self.greenlet = greenlet.greenlet(self.run) self.stopping = False self.running = False self.timers = [] self.next_timers = [] self.lclass = FdListener self.timers_canceled = 0 self.debug_exceptions = True self.debug_blocking = False self.debug_blocking_resolution = 1 def block_detect_pre(self): # shortest alarm we can possibly raise is one second tmp = signal.signal(signal.SIGALRM, alarm_handler) if tmp != alarm_handler: self._old_signal_handler = tmp arm_alarm(self.debug_blocking_resolution) def block_detect_post(self): if (hasattr(self, "_old_signal_handler") and self._old_signal_handler): signal.signal(signal.SIGALRM, self._old_signal_handler) signal.alarm(0) def add(self, evtype, fileno, cb, tb, mark_as_closed): """ Signals an intent to or write a particular file descriptor. The *evtype* argument is either the constant READ or WRITE. The *fileno* argument is the file number of the file of interest. The *cb* argument is the callback which will be called when the file is ready for reading/writing. The *tb* argument is the throwback used to signal (into the greenlet) that the file was closed. The *mark_as_closed* is used in the context of the event hub to prepare a Python object as being closed, pre-empting further close operations from accidentally shutting down the wrong OS thread. """ listener = self.lclass(evtype, fileno, cb, tb, mark_as_closed) bucket = self.listeners[evtype] if fileno in bucket: if g_prevent_multiple_readers: raise RuntimeError( "Second simultaneous %s on fileno %s " "detected. Unless you really know what you're doing, " "make sure that only one greenthread can %s any " "particular socket. Consider using a pools.Pool. " "If you do know what you're doing and want to disable " "this error, call " "eventlet.debug.hub_prevent_multiple_readers(False) - MY THREAD=%s; " "THAT THREAD=%s" % ( evtype, fileno, evtype, cb, bucket[fileno])) # store off the second listener in another structure self.secondaries[evtype].setdefault(fileno, []).append(listener) else: bucket[fileno] = listener return listener def _obsolete(self, fileno): """ We've received an indication that 'fileno' has been obsoleted. Any current listeners must be defanged, and notifications to their greenlets queued up to send. """ found = False for evtype, bucket in six.iteritems(self.secondaries): if fileno in bucket: for listener in bucket[fileno]: found = True self.closed.append(listener) listener.defang() del bucket[fileno] # For the primary listeners, we actually need to call remove, # which may modify the underlying OS polling objects. for evtype, bucket in six.iteritems(self.listeners): if fileno in bucket: listener = bucket[fileno] found = True self.closed.append(listener) self.remove(listener) listener.defang() return found def notify_close(self, fileno): """ We might want to do something when a fileno is closed. However, currently it suffices to obsolete listeners only when we detect an old fileno being recycled, on open. """ pass def remove(self, listener): if listener.spent: # trampoline may trigger this in its finally section. return fileno = listener.fileno evtype = listener.evtype if listener is self.listeners[evtype][fileno]: del self.listeners[evtype][fileno] # migrate a secondary listener to be the primary listener if fileno in self.secondaries[evtype]: sec = self.secondaries[evtype][fileno] if sec: self.listeners[evtype][fileno] = sec.pop(0) if not sec: del self.secondaries[evtype][fileno] else: self.secondaries[evtype][fileno].remove(listener) if not self.secondaries[evtype][fileno]: del self.secondaries[evtype][fileno] def mark_as_reopened(self, fileno): """ If a file descriptor is returned by the OS as the result of some open call (or equivalent), that signals that it might be being recycled. Catch the case where the fd was previously in use. """ self._obsolete(fileno) def remove_descriptor(self, fileno): """ Completely remove all listeners for this fileno. For internal use only.""" # gather any listeners we have listeners = [] listeners.append(self.listeners[READ].get(fileno, noop)) listeners.append(self.listeners[WRITE].get(fileno, noop)) listeners.extend(self.secondaries[READ].get(fileno, ())) listeners.extend(self.secondaries[WRITE].get(fileno, ())) for listener in listeners: try: # listener.cb may want to remove(listener) listener.cb(fileno) except Exception: self.squelch_generic_exception(sys.exc_info()) # NOW this fileno is now dead to all self.listeners[READ].pop(fileno, None) self.listeners[WRITE].pop(fileno, None) self.secondaries[READ].pop(fileno, None) self.secondaries[WRITE].pop(fileno, None) def close_one(self): """ Triggered from the main run loop. If a listener's underlying FD was closed somehow, throw an exception back to the trampoline, which should be able to manage it appropriately. """ listener = self.closed.pop() if not listener.greenlet.dead: # There's no point signalling a greenlet that's already dead. listener.tb(eventlet.hubs.IOClosed(errno.ENOTCONN, "Operation on closed file")) def ensure_greenlet(self): if self.greenlet.dead: # create new greenlet sharing same parent as original new = greenlet.greenlet(self.run, self.greenlet.parent) # need to assign as parent of old greenlet # for those greenlets that are currently # children of the dead hub and may subsequently # exit without further switching to hub. self.greenlet.parent = new self.greenlet = new def switch(self): cur = greenlet.getcurrent() assert cur is not self.greenlet, 'Cannot switch to MAINLOOP from MAINLOOP' switch_out = getattr(cur, 'switch_out', None) if switch_out is not None: try: switch_out() except: self.squelch_generic_exception(sys.exc_info()) self.ensure_greenlet() try: if self.greenlet.parent is not cur: cur.parent = self.greenlet except ValueError: pass # gets raised if there is a greenlet parent cycle clear_sys_exc_info() return self.greenlet.switch() def squelch_exception(self, fileno, exc_info): traceback.print_exception(*exc_info) sys.stderr.write("Removing descriptor: %r\n" % (fileno,)) sys.stderr.flush() try: self.remove_descriptor(fileno) except Exception as e: sys.stderr.write("Exception while removing descriptor! %r\n" % (e,)) sys.stderr.flush() def wait(self, seconds=None): raise NotImplementedError("Implement this in a subclass") def default_sleep(self): return 60.0 def sleep_until(self): t = self.timers if not t: return None return t[0][0] def run(self, *a, **kw): """Run the runloop until abort is called. """ # accept and discard variable arguments because they will be # supplied if other greenlets have run and exited before the # hub's greenlet gets a chance to run if self.running: raise RuntimeError("Already running!") try: self.running = True self.stopping = False while not self.stopping: while self.closed: # We ditch all of these first. self.close_one() self.prepare_timers() if self.debug_blocking: self.block_detect_pre() self.fire_timers(self.clock()) if self.debug_blocking: self.block_detect_post() self.prepare_timers() wakeup_when = self.sleep_until() if wakeup_when is None: sleep_time = self.default_sleep() else: sleep_time = wakeup_when - self.clock() if sleep_time > 0: self.wait(sleep_time) else: self.wait(0) else: self.timers_canceled = 0 del self.timers[:] del self.next_timers[:] finally: self.running = False self.stopping = False def abort(self, wait=False): """Stop the runloop. If run is executing, it will exit after completing the next runloop iteration. Set *wait* to True to cause abort to switch to the hub immediately and wait until it's finished processing. Waiting for the hub will only work from the main greenthread; all other greenthreads will become unreachable. """ if self.running: self.stopping = True if wait: assert self.greenlet is not greenlet.getcurrent( ), "Can't abort with wait from inside the hub's greenlet." # schedule an immediate timer just so the hub doesn't sleep self.schedule_call_global(0, lambda: None) # switch to it; when done the hub will switch back to its parent, # the main greenlet self.switch() def squelch_generic_exception(self, exc_info): if self.debug_exceptions: traceback.print_exception(*exc_info) sys.stderr.flush() clear_sys_exc_info() def squelch_timer_exception(self, timer, exc_info): if self.debug_exceptions: traceback.print_exception(*exc_info) sys.stderr.flush() clear_sys_exc_info() def add_timer(self, timer): scheduled_time = self.clock() + timer.seconds self.next_timers.append((scheduled_time, timer)) return scheduled_time def timer_canceled(self, timer): self.timers_canceled += 1 len_timers = len(self.timers) + len(self.next_timers) if len_timers > 1000 and len_timers / 2 <= self.timers_canceled: self.timers_canceled = 0 self.timers = [t for t in self.timers if not t[1].called] self.next_timers = [t for t in self.next_timers if not t[1].called] heapq.heapify(self.timers) def prepare_timers(self): heappush = heapq.heappush t = self.timers for item in self.next_timers: if item[1].called: self.timers_canceled -= 1 else: heappush(t, item) del self.next_timers[:] def schedule_call_local(self, seconds, cb, *args, **kw): """Schedule a callable to be called after 'seconds' seconds have elapsed. Cancel the timer if greenlet has exited. seconds: The number of seconds to wait. cb: The callable to call after the given time. *args: Arguments to pass to the callable when called. **kw: Keyword arguments to pass to the callable when called. """ t = timer.LocalTimer(seconds, cb, *args, **kw) self.add_timer(t) return t def schedule_call_global(self, seconds, cb, *args, **kw): """Schedule a callable to be called after 'seconds' seconds have elapsed. The timer will NOT be canceled if the current greenlet has exited before the timer fires. seconds: The number of seconds to wait. cb: The callable to call after the given time. *args: Arguments to pass to the callable when called. **kw: Keyword arguments to pass to the callable when called. """ t = timer.Timer(seconds, cb, *args, **kw) self.add_timer(t) return t def fire_timers(self, when): t = self.timers heappop = heapq.heappop while t: next = t[0] exp = next[0] timer = next[1] if when < exp: break heappop(t) try: if timer.called: self.timers_canceled -= 1 else: timer() except self.SYSTEM_EXCEPTIONS: raise except: self.squelch_timer_exception(timer, sys.exc_info()) clear_sys_exc_info() # for debugging: def get_readers(self): return self.listeners[READ].values() def get_writers(self): return self.listeners[WRITE].values() def get_timers_count(hub): return len(hub.timers) + len(hub.next_timers) def set_debug_listeners(self, value): if value: self.lclass = DebugListener else: self.lclass = FdListener def set_timer_exceptions(self, value): self.debug_exceptions = value eventlet-0.30.2/eventlet/hubs/kqueue.py0000644000076500000240000000673214006212666020527 0ustar temotostaff00000000000000import os import sys from eventlet import patcher, support from eventlet.hubs import hub import six select = patcher.original('select') time = patcher.original('time') def is_available(): return hasattr(select, 'kqueue') class Hub(hub.BaseHub): MAX_EVENTS = 100 def __init__(self, clock=None): self.FILTERS = { hub.READ: select.KQ_FILTER_READ, hub.WRITE: select.KQ_FILTER_WRITE, } super(Hub, self).__init__(clock) self._events = {} self._init_kqueue() def _init_kqueue(self): self.kqueue = select.kqueue() self._pid = os.getpid() def _reinit_kqueue(self): self.kqueue.close() self._init_kqueue() events = [e for i in six.itervalues(self._events) for e in six.itervalues(i)] self.kqueue.control(events, 0, 0) def _control(self, events, max_events, timeout): try: return self.kqueue.control(events, max_events, timeout) except (OSError, IOError): # have we forked? if os.getpid() != self._pid: self._reinit_kqueue() return self.kqueue.control(events, max_events, timeout) raise def add(self, evtype, fileno, cb, tb, mac): listener = super(Hub, self).add(evtype, fileno, cb, tb, mac) events = self._events.setdefault(fileno, {}) if evtype not in events: try: event = select.kevent(fileno, self.FILTERS.get(evtype), select.KQ_EV_ADD) self._control([event], 0, 0) events[evtype] = event except ValueError: super(Hub, self).remove(listener) raise return listener def _delete_events(self, events): del_events = [ select.kevent(e.ident, e.filter, select.KQ_EV_DELETE) for e in events ] self._control(del_events, 0, 0) def remove(self, listener): super(Hub, self).remove(listener) evtype = listener.evtype fileno = listener.fileno if not self.listeners[evtype].get(fileno): event = self._events[fileno].pop(evtype, None) if event is None: return try: self._delete_events((event,)) except OSError: pass def remove_descriptor(self, fileno): super(Hub, self).remove_descriptor(fileno) try: events = self._events.pop(fileno).values() self._delete_events(events) except KeyError: pass except OSError: pass def wait(self, seconds=None): readers = self.listeners[self.READ] writers = self.listeners[self.WRITE] if not readers and not writers: if seconds: time.sleep(seconds) return result = self._control([], self.MAX_EVENTS, seconds) SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS for event in result: fileno = event.ident evfilt = event.filter try: if evfilt == select.KQ_FILTER_READ: readers.get(fileno, hub.noop).cb(fileno) if evfilt == select.KQ_FILTER_WRITE: writers.get(fileno, hub.noop).cb(fileno) except SYSTEM_EXCEPTIONS: raise except: self.squelch_exception(fileno, sys.exc_info()) support.clear_sys_exc_info() eventlet-0.30.2/eventlet/hubs/poll.py0000644000076500000240000000766514006212666020204 0ustar temotostaff00000000000000import errno import sys from eventlet import patcher, support from eventlet.hubs import hub select = patcher.original('select') time = patcher.original('time') def is_available(): return hasattr(select, 'poll') class Hub(hub.BaseHub): def __init__(self, clock=None): super(Hub, self).__init__(clock) self.EXC_MASK = select.POLLERR | select.POLLHUP self.READ_MASK = select.POLLIN | select.POLLPRI self.WRITE_MASK = select.POLLOUT self.poll = select.poll() def add(self, evtype, fileno, cb, tb, mac): listener = super(Hub, self).add(evtype, fileno, cb, tb, mac) self.register(fileno, new=True) return listener def remove(self, listener): super(Hub, self).remove(listener) self.register(listener.fileno) def register(self, fileno, new=False): mask = 0 if self.listeners[self.READ].get(fileno): mask |= self.READ_MASK | self.EXC_MASK if self.listeners[self.WRITE].get(fileno): mask |= self.WRITE_MASK | self.EXC_MASK try: if mask: if new: self.poll.register(fileno, mask) else: try: self.poll.modify(fileno, mask) except (IOError, OSError): self.poll.register(fileno, mask) else: try: self.poll.unregister(fileno) except (KeyError, IOError, OSError): # raised if we try to remove a fileno that was # already removed/invalid pass except ValueError: # fileno is bad, issue 74 self.remove_descriptor(fileno) raise def remove_descriptor(self, fileno): super(Hub, self).remove_descriptor(fileno) try: self.poll.unregister(fileno) except (KeyError, ValueError, IOError, OSError): # raised if we try to remove a fileno that was # already removed/invalid pass def do_poll(self, seconds): # poll.poll expects integral milliseconds return self.poll.poll(int(seconds * 1000.0)) def wait(self, seconds=None): readers = self.listeners[self.READ] writers = self.listeners[self.WRITE] if not readers and not writers: if seconds: time.sleep(seconds) return try: presult = self.do_poll(seconds) except (IOError, select.error) as e: if support.get_errno(e) == errno.EINTR: return raise SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS if self.debug_blocking: self.block_detect_pre() # Accumulate the listeners to call back to prior to # triggering any of them. This is to keep the set # of callbacks in sync with the events we've just # polled for. It prevents one handler from invalidating # another. callbacks = set() noop = hub.noop # shave getattr for fileno, event in presult: if event & self.READ_MASK: callbacks.add((readers.get(fileno, noop), fileno)) if event & self.WRITE_MASK: callbacks.add((writers.get(fileno, noop), fileno)) if event & select.POLLNVAL: self.remove_descriptor(fileno) continue if event & self.EXC_MASK: callbacks.add((readers.get(fileno, noop), fileno)) callbacks.add((writers.get(fileno, noop), fileno)) for listener, fileno in callbacks: try: listener.cb(fileno) except SYSTEM_EXCEPTIONS: raise except: self.squelch_exception(fileno, sys.exc_info()) support.clear_sys_exc_info() if self.debug_blocking: self.block_detect_post() eventlet-0.30.2/eventlet/hubs/pyevent.py0000644000076500000240000001322414006212666020714 0ustar temotostaff00000000000000import sys import traceback import types import warnings from eventlet.support import greenlets as greenlet import six from eventlet.hubs.hub import BaseHub, READ, WRITE try: import event except ImportError: event = None def is_available(): return event is not None class event_wrapper(object): def __init__(self, impl=None, seconds=None): self.impl = impl self.seconds = seconds def __repr__(self): if self.impl is not None: return repr(self.impl) else: return object.__repr__(self) def __str__(self): if self.impl is not None: return str(self.impl) else: return object.__str__(self) def cancel(self): if self.impl is not None: self.impl.delete() self.impl = None @property def pending(self): return bool(self.impl and self.impl.pending()) class Hub(BaseHub): SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit) def __init__(self): super(Hub, self).__init__() event.init() self.signal_exc_info = None self.signal( 2, lambda signalnum, frame: self.greenlet.parent.throw(KeyboardInterrupt)) self.events_to_add = [] warnings.warn( "ACTION REQUIRED eventlet pyevent hub is deprecated and will be removed soon", DeprecationWarning, ) def dispatch(self): loop = event.loop while True: for e in self.events_to_add: if e is not None and e.impl is not None and e.seconds is not None: e.impl.add(e.seconds) e.seconds = None self.events_to_add = [] result = loop() if getattr(event, '__event_exc', None) is not None: # only have to do this because of bug in event.loop t = getattr(event, '__event_exc') setattr(event, '__event_exc', None) assert getattr(event, '__event_exc') is None six.reraise(t[0], t[1], t[2]) if result != 0: return result def run(self): while True: try: self.dispatch() except greenlet.GreenletExit: break except self.SYSTEM_EXCEPTIONS: raise except: if self.signal_exc_info is not None: self.schedule_call_global( 0, greenlet.getcurrent().parent.throw, *self.signal_exc_info) self.signal_exc_info = None else: self.squelch_timer_exception(None, sys.exc_info()) def abort(self, wait=True): self.schedule_call_global(0, self.greenlet.throw, greenlet.GreenletExit) if wait: assert self.greenlet is not greenlet.getcurrent( ), "Can't abort with wait from inside the hub's greenlet." self.switch() def _getrunning(self): return bool(self.greenlet) def _setrunning(self, value): pass # exists for compatibility with BaseHub running = property(_getrunning, _setrunning) def add(self, evtype, fileno, real_cb, real_tb, mac): # this is stupid: pyevent won't call a callback unless it's a function, # so we have to force it to be one here if isinstance(real_cb, types.BuiltinMethodType): def cb(_d): real_cb(_d) else: cb = real_cb if evtype is READ: evt = event.read(fileno, cb, fileno) elif evtype is WRITE: evt = event.write(fileno, cb, fileno) return super(Hub, self).add(evtype, fileno, evt, real_tb, mac) def signal(self, signalnum, handler): def wrapper(): try: handler(signalnum, None) except: self.signal_exc_info = sys.exc_info() event.abort() return event_wrapper(event.signal(signalnum, wrapper)) def remove(self, listener): super(Hub, self).remove(listener) listener.cb.delete() def remove_descriptor(self, fileno): for lcontainer in six.itervalues(self.listeners): listener = lcontainer.pop(fileno, None) if listener: try: listener.cb.delete() except self.SYSTEM_EXCEPTIONS: raise except: traceback.print_exc() def schedule_call_local(self, seconds, cb, *args, **kwargs): current = greenlet.getcurrent() if current is self.greenlet: return self.schedule_call_global(seconds, cb, *args, **kwargs) event_impl = event.event(_scheduled_call_local, (cb, args, kwargs, current)) wrapper = event_wrapper(event_impl, seconds=seconds) self.events_to_add.append(wrapper) return wrapper schedule_call = schedule_call_local def schedule_call_global(self, seconds, cb, *args, **kwargs): event_impl = event.event(_scheduled_call, (cb, args, kwargs)) wrapper = event_wrapper(event_impl, seconds=seconds) self.events_to_add.append(wrapper) return wrapper def _version_info(self): baseversion = event.__version__ return baseversion def _scheduled_call(event_impl, handle, evtype, arg): cb, args, kwargs = arg try: cb(*args, **kwargs) finally: event_impl.delete() def _scheduled_call_local(event_impl, handle, evtype, arg): cb, args, kwargs, caller_greenlet = arg try: if not caller_greenlet.dead: cb(*args, **kwargs) finally: event_impl.delete() eventlet-0.30.2/eventlet/hubs/selects.py0000644000076500000240000000400614006212666020662 0ustar temotostaff00000000000000import errno import sys from eventlet import patcher, support from eventlet.hubs import hub select = patcher.original('select') time = patcher.original('time') try: BAD_SOCK = set((errno.EBADF, errno.WSAENOTSOCK)) except AttributeError: BAD_SOCK = set((errno.EBADF,)) def is_available(): return hasattr(select, 'select') class Hub(hub.BaseHub): def _remove_bad_fds(self): """ Iterate through fds, removing the ones that are bad per the operating system. """ all_fds = list(self.listeners[self.READ]) + list(self.listeners[self.WRITE]) for fd in all_fds: try: select.select([fd], [], [], 0) except select.error as e: if support.get_errno(e) in BAD_SOCK: self.remove_descriptor(fd) def wait(self, seconds=None): readers = self.listeners[self.READ] writers = self.listeners[self.WRITE] if not readers and not writers: if seconds: time.sleep(seconds) return reader_fds = list(readers) writer_fds = list(writers) all_fds = reader_fds + writer_fds try: r, w, er = select.select(reader_fds, writer_fds, all_fds, seconds) except select.error as e: if support.get_errno(e) == errno.EINTR: return elif support.get_errno(e) in BAD_SOCK: self._remove_bad_fds() return else: raise for fileno in er: readers.get(fileno, hub.noop).cb(fileno) writers.get(fileno, hub.noop).cb(fileno) for listeners, events in ((readers, r), (writers, w)): for fileno in events: try: listeners.get(fileno, hub.noop).cb(fileno) except self.SYSTEM_EXCEPTIONS: raise except: self.squelch_exception(fileno, sys.exc_info()) support.clear_sys_exc_info() eventlet-0.30.2/eventlet/hubs/timer.py0000644000076500000240000000617314006212666020347 0ustar temotostaff00000000000000import traceback import eventlet.hubs from eventlet.support import greenlets as greenlet import six """ If true, captures a stack trace for each timer when constructed. This is useful for debugging leaking timers, to find out where the timer was set up. """ _g_debug = False class Timer(object): def __init__(self, seconds, cb, *args, **kw): """Create a timer. seconds: The minimum number of seconds to wait before calling cb: The callback to call when the timer has expired *args: The arguments to pass to cb **kw: The keyword arguments to pass to cb This timer will not be run unless it is scheduled in a runloop by calling timer.schedule() or runloop.add_timer(timer). """ self.seconds = seconds self.tpl = cb, args, kw self.called = False if _g_debug: self.traceback = six.StringIO() traceback.print_stack(file=self.traceback) @property def pending(self): return not self.called def __repr__(self): secs = getattr(self, 'seconds', None) cb, args, kw = getattr(self, 'tpl', (None, None, None)) retval = "Timer(%s, %s, *%s, **%s)" % ( secs, cb, args, kw) if _g_debug and hasattr(self, 'traceback'): retval += '\n' + self.traceback.getvalue() return retval def copy(self): cb, args, kw = self.tpl return self.__class__(self.seconds, cb, *args, **kw) def schedule(self): """Schedule this timer to run in the current runloop. """ self.called = False self.scheduled_time = eventlet.hubs.get_hub().add_timer(self) return self def __call__(self, *args): if not self.called: self.called = True cb, args, kw = self.tpl try: cb(*args, **kw) finally: try: del self.tpl except AttributeError: pass def cancel(self): """Prevent this timer from being called. If the timer has already been called or canceled, has no effect. """ if not self.called: self.called = True eventlet.hubs.get_hub().timer_canceled(self) try: del self.tpl except AttributeError: pass # No default ordering in 3.x. heapq uses < # FIXME should full set be added? def __lt__(self, other): return id(self) < id(other) class LocalTimer(Timer): def __init__(self, *args, **kwargs): self.greenlet = greenlet.getcurrent() Timer.__init__(self, *args, **kwargs) @property def pending(self): if self.greenlet is None or self.greenlet.dead: return False return not self.called def __call__(self, *args): if not self.called: self.called = True if self.greenlet is not None and self.greenlet.dead: return cb, args, kw = self.tpl cb(*args, **kw) def cancel(self): self.greenlet = None Timer.cancel(self) eventlet-0.30.2/eventlet/patcher.py0000644000076500000240000004521414006212666017713 0ustar temotostaff00000000000000import imp import sys try: # Only for this purpose, it's irrelevant if `os` was already patched. # https://github.com/eventlet/eventlet/pull/661 from os import register_at_fork except ImportError: register_at_fork = None import eventlet import six __all__ = ['inject', 'import_patched', 'monkey_patch', 'is_monkey_patched'] __exclude = set(('__builtins__', '__file__', '__name__')) class SysModulesSaver(object): """Class that captures some subset of the current state of sys.modules. Pass in an iterator of module names to the constructor.""" def __init__(self, module_names=()): self._saved = {} imp.acquire_lock() self.save(*module_names) def save(self, *module_names): """Saves the named modules to the object.""" for modname in module_names: self._saved[modname] = sys.modules.get(modname, None) def restore(self): """Restores the modules that the saver knows about into sys.modules. """ try: for modname, mod in six.iteritems(self._saved): if mod is not None: sys.modules[modname] = mod else: try: del sys.modules[modname] except KeyError: pass finally: imp.release_lock() def inject(module_name, new_globals, *additional_modules): """Base method for "injecting" greened modules into an imported module. It imports the module specified in *module_name*, arranging things so that the already-imported modules in *additional_modules* are used when *module_name* makes its imports. **Note:** This function does not create or change any sys.modules item, so if your greened module use code like 'sys.modules["your_module_name"]', you need to update sys.modules by yourself. *new_globals* is either None or a globals dictionary that gets populated with the contents of the *module_name* module. This is useful when creating a "green" version of some other module. *additional_modules* should be a collection of two-element tuples, of the form (, ). If it's not specified, a default selection of name/module pairs is used, which should cover all use cases but may be slower because there are inevitably redundant or unnecessary imports. """ patched_name = '__patched_module_' + module_name if patched_name in sys.modules: # returning already-patched module so as not to destroy existing # references to patched modules return sys.modules[patched_name] if not additional_modules: # supply some defaults additional_modules = ( _green_os_modules() + _green_select_modules() + _green_socket_modules() + _green_thread_modules() + _green_time_modules()) # _green_MySQLdb()) # enable this after a short baking-in period # after this we are gonna screw with sys.modules, so capture the # state of all the modules we're going to mess with, and lock saver = SysModulesSaver([name for name, m in additional_modules]) saver.save(module_name) # Cover the target modules so that when you import the module it # sees only the patched versions for name, mod in additional_modules: sys.modules[name] = mod # Remove the old module from sys.modules and reimport it while # the specified modules are in place sys.modules.pop(module_name, None) # Also remove sub modules and reimport. Use copy the keys to list # because of the pop operations will change the content of sys.modules # within th loop for imported_module_name in list(sys.modules.keys()): if imported_module_name.startswith(module_name + '.'): sys.modules.pop(imported_module_name, None) try: module = __import__(module_name, {}, {}, module_name.split('.')[:-1]) if new_globals is not None: # Update the given globals dictionary with everything from this new module for name in dir(module): if name not in __exclude: new_globals[name] = getattr(module, name) # Keep a reference to the new module to prevent it from dying sys.modules[patched_name] = module finally: saver.restore() # Put the original modules back return module def import_patched(module_name, *additional_modules, **kw_additional_modules): """Imports a module in a way that ensures that the module uses "green" versions of the standard library modules, so that everything works nonblockingly. The only required argument is the name of the module to be imported. """ return inject( module_name, None, *additional_modules + tuple(kw_additional_modules.items())) def patch_function(func, *additional_modules): """Decorator that returns a version of the function that patches some modules for the duration of the function call. This is deeply gross and should only be used for functions that import network libraries within their function bodies that there is no way of getting around.""" if not additional_modules: # supply some defaults additional_modules = ( _green_os_modules() + _green_select_modules() + _green_socket_modules() + _green_thread_modules() + _green_time_modules()) def patched(*args, **kw): saver = SysModulesSaver() for name, mod in additional_modules: saver.save(name) sys.modules[name] = mod try: return func(*args, **kw) finally: saver.restore() return patched def _original_patch_function(func, *module_names): """Kind of the contrapositive of patch_function: decorates a function such that when it's called, sys.modules is populated only with the unpatched versions of the specified modules. Unlike patch_function, only the names of the modules need be supplied, and there are no defaults. This is a gross hack; tell your kids not to import inside function bodies!""" def patched(*args, **kw): saver = SysModulesSaver(module_names) for name in module_names: sys.modules[name] = original(name) try: return func(*args, **kw) finally: saver.restore() return patched def original(modname): """ This returns an unpatched version of a module; this is useful for Eventlet itself (i.e. tpool).""" # note that it's not necessary to temporarily install unpatched # versions of all patchable modules during the import of the # module; this is because none of them import each other, except # for threading which imports thread original_name = '__original_module_' + modname if original_name in sys.modules: return sys.modules.get(original_name) # re-import the "pure" module and store it in the global _originals # dict; be sure to restore whatever module had that name already saver = SysModulesSaver((modname,)) sys.modules.pop(modname, None) # some rudimentary dependency checking -- fortunately the modules # we're working on don't have many dependencies so we can just do # some special-casing here if six.PY2: deps = {'threading': 'thread', 'Queue': 'threading'} if six.PY3: deps = {'threading': '_thread', 'queue': 'threading'} if modname in deps: dependency = deps[modname] saver.save(dependency) sys.modules[dependency] = original(dependency) try: real_mod = __import__(modname, {}, {}, modname.split('.')[:-1]) if modname in ('Queue', 'queue') and not hasattr(real_mod, '_threading'): # tricky hack: Queue's constructor in <2.7 imports # threading on every instantiation; therefore we wrap # it so that it always gets the original threading real_mod.Queue.__init__ = _original_patch_function( real_mod.Queue.__init__, 'threading') # save a reference to the unpatched module so it doesn't get lost sys.modules[original_name] = real_mod finally: saver.restore() return sys.modules[original_name] already_patched = {} def monkey_patch(**on): """Globally patches certain system modules to be greenthread-friendly. The keyword arguments afford some control over which modules are patched. If no keyword arguments are supplied, all possible modules are patched. If keywords are set to True, only the specified modules are patched. E.g., ``monkey_patch(socket=True, select=True)`` patches only the select and socket modules. Most arguments patch the single module of the same name (os, time, select). The exceptions are socket, which also patches the ssl module if present; and thread, which patches thread, threading, and Queue. It's safe to call monkey_patch multiple times. """ # Workaround for import cycle observed as following in monotonic # RuntimeError: no suitable implementation for this system # see https://github.com/eventlet/eventlet/issues/401#issuecomment-325015989 # # Make sure the hub is completely imported before any # monkey-patching, or we risk recursion if the process of importing # the hub calls into monkey-patched modules. eventlet.hubs.get_hub() accepted_args = set(('os', 'select', 'socket', 'thread', 'time', 'psycopg', 'MySQLdb', 'builtins', 'subprocess')) # To make sure only one of them is passed here assert not ('__builtin__' in on and 'builtins' in on) try: b = on.pop('__builtin__') except KeyError: pass else: on['builtins'] = b default_on = on.pop("all", None) for k in six.iterkeys(on): if k not in accepted_args: raise TypeError("monkey_patch() got an unexpected " "keyword argument %r" % k) if default_on is None: default_on = not (True in on.values()) for modname in accepted_args: if modname == 'MySQLdb': # MySQLdb is only on when explicitly patched for the moment on.setdefault(modname, False) if modname == 'builtins': on.setdefault(modname, False) on.setdefault(modname, default_on) if on['thread'] and not already_patched.get('thread'): _green_existing_locks() modules_to_patch = [] for name, modules_function in [ ('os', _green_os_modules), ('select', _green_select_modules), ('socket', _green_socket_modules), ('thread', _green_thread_modules), ('time', _green_time_modules), ('MySQLdb', _green_MySQLdb), ('builtins', _green_builtins), ('subprocess', _green_subprocess_modules), ]: if on[name] and not already_patched.get(name): modules_to_patch += modules_function() already_patched[name] = True if on['psycopg'] and not already_patched.get('psycopg'): try: from eventlet.support import psycopg2_patcher psycopg2_patcher.make_psycopg_green() already_patched['psycopg'] = True except ImportError: # note that if we get an importerror from trying to # monkeypatch psycopg, we will continually retry it # whenever monkey_patch is called; this should not be a # performance problem but it allows is_monkey_patched to # tell us whether or not we succeeded pass _threading = original('threading') imp.acquire_lock() try: for name, mod in modules_to_patch: orig_mod = sys.modules.get(name) if orig_mod is None: orig_mod = __import__(name) for attr_name in mod.__patched__: patched_attr = getattr(mod, attr_name, None) if patched_attr is not None: setattr(orig_mod, attr_name, patched_attr) deleted = getattr(mod, '__deleted__', []) for attr_name in deleted: if hasattr(orig_mod, attr_name): delattr(orig_mod, attr_name) # https://github.com/eventlet/eventlet/issues/592 if name == 'threading' and register_at_fork: def fix_threading_active( _global_dict=_threading.current_thread.__globals__, # alias orig_mod as patched to reflect its new state # https://github.com/eventlet/eventlet/pull/661#discussion_r509877481 _patched=orig_mod, ): _prefork_active = [None] def before_fork(): _prefork_active[0] = _global_dict['_active'] _global_dict['_active'] = _patched._active def after_fork(): _global_dict['_active'] = _prefork_active[0] register_at_fork( before=before_fork, after_in_parent=after_fork) fix_threading_active() finally: imp.release_lock() if sys.version_info >= (3, 3): import importlib._bootstrap thread = original('_thread') # importlib must use real thread locks, not eventlet.Semaphore importlib._bootstrap._thread = thread # Issue #185: Since Python 3.3, threading.RLock is implemented in C and # so call a C function to get the thread identifier, instead of calling # threading.get_ident(). Force the Python implementation of RLock which # calls threading.get_ident() and so is compatible with eventlet. import threading threading.RLock = threading._PyRLock # Issue #508: Since Python 3.7 queue.SimpleQueue is implemented in C, # causing a deadlock. Replace the C implementation with the Python one. if sys.version_info >= (3, 7): import queue queue.SimpleQueue = queue._PySimpleQueue def is_monkey_patched(module): """Returns True if the given module is monkeypatched currently, False if not. *module* can be either the module itself or its name. Based entirely off the name of the module, so if you import a module some other way than with the import keyword (including import_patched), this might not be correct about that particular module.""" return module in already_patched or \ getattr(module, '__name__', None) in already_patched def _green_existing_locks(): """Make locks created before monkey-patching safe. RLocks rely on a Lock and on Python 2, if an unpatched Lock blocks, it blocks the native thread. We need to replace these with green Locks. This was originally noticed in the stdlib logging module.""" import gc import threading import eventlet.green.thread lock_type = type(threading.Lock()) rlock_type = type(threading.RLock()) if hasattr(threading, '_PyRLock'): # this happens on CPython3 and PyPy >= 7.0.0: "py3-style" rlocks, they # are implemented natively in C and RPython respectively py3_style = True pyrlock_type = type(threading._PyRLock()) else: # this happens on CPython2.7 and PyPy < 7.0.0: "py2-style" rlocks, # they are implemented in pure-python py3_style = False pyrlock_type = None # We're monkey-patching so there can't be any greenlets yet, ergo our thread # ID is the only valid owner possible. tid = eventlet.green.thread.get_ident() for obj in gc.get_objects(): if isinstance(obj, rlock_type): if not py3_style and isinstance(obj._RLock__block, lock_type): _fix_py2_rlock(obj, tid) elif py3_style and not isinstance(obj, pyrlock_type): _fix_py3_rlock(obj) def _fix_py2_rlock(rlock, tid): import eventlet.green.threading old = rlock._RLock__block new = eventlet.green.threading.Lock() rlock._RLock__block = new if old.locked(): new.acquire() rlock._RLock__owner = tid def _fix_py3_rlock(old): import gc import threading new = threading._PyRLock() while old._is_owned(): old.release() new.acquire() if old._is_owned(): new.acquire() gc.collect() for ref in gc.get_referrers(old): try: ref_vars = vars(ref) except TypeError: pass else: for k, v in ref_vars.items(): if v == old: setattr(ref, k, new) def _green_os_modules(): from eventlet.green import os return [('os', os)] def _green_select_modules(): from eventlet.green import select modules = [('select', select)] if sys.version_info >= (3, 4): from eventlet.green import selectors modules.append(('selectors', selectors)) return modules def _green_socket_modules(): from eventlet.green import socket try: from eventlet.green import ssl return [('socket', socket), ('ssl', ssl)] except ImportError: return [('socket', socket)] def _green_subprocess_modules(): from eventlet.green import subprocess return [('subprocess', subprocess)] def _green_thread_modules(): from eventlet.green import Queue from eventlet.green import thread from eventlet.green import threading if six.PY2: return [('Queue', Queue), ('thread', thread), ('threading', threading)] if six.PY3: return [('queue', Queue), ('_thread', thread), ('threading', threading)] def _green_time_modules(): from eventlet.green import time return [('time', time)] def _green_MySQLdb(): try: from eventlet.green import MySQLdb return [('MySQLdb', MySQLdb)] except ImportError: return [] def _green_builtins(): try: from eventlet.green import builtin return [('__builtin__' if six.PY2 else 'builtins', builtin)] except ImportError: return [] def slurp_properties(source, destination, ignore=[], srckeys=None): """Copy properties from *source* (assumed to be a module) to *destination* (assumed to be a dict). *ignore* lists properties that should not be thusly copied. *srckeys* is a list of keys to copy, if the source's __all__ is untrustworthy. """ if srckeys is None: srckeys = source.__all__ destination.update(dict([ (name, getattr(source, name)) for name in srckeys if not (name.startswith('__') or name in ignore) ])) if __name__ == "__main__": sys.argv.pop(0) monkey_patch() with open(sys.argv[0]) as f: code = compile(f.read(), sys.argv[0], 'exec') exec(code) eventlet-0.30.2/eventlet/pools.py0000644000076500000240000001423314006212666017416 0ustar temotostaff00000000000000from __future__ import print_function import collections from contextlib import contextmanager from eventlet import queue __all__ = ['Pool', 'TokenPool'] class Pool(object): """ Pool class implements resource limitation and construction. There are two ways of using Pool: passing a `create` argument or subclassing. In either case you must provide a way to create the resource. When using `create` argument, pass a function with no arguments:: http_pool = pools.Pool(create=httplib2.Http) If you need to pass arguments, build a nullary function with either `lambda` expression:: http_pool = pools.Pool(create=lambda: httplib2.Http(timeout=90)) or :func:`functools.partial`:: from functools import partial http_pool = pools.Pool(create=partial(httplib2.Http, timeout=90)) When subclassing, define only the :meth:`create` method to implement the desired resource:: class MyPool(pools.Pool): def create(self): return MyObject() If using 2.5 or greater, the :meth:`item` method acts as a context manager; that's the best way to use it:: with mypool.item() as thing: thing.dostuff() The maximum size of the pool can be modified at runtime via the :meth:`resize` method. Specifying a non-zero *min-size* argument pre-populates the pool with *min_size* items. *max-size* sets a hard limit to the size of the pool -- it cannot contain any more items than *max_size*, and if there are already *max_size* items 'checked out' of the pool, the pool will cause any greenthread calling :meth:`get` to cooperatively yield until an item is :meth:`put` in. """ def __init__(self, min_size=0, max_size=4, order_as_stack=False, create=None): """*order_as_stack* governs the ordering of the items in the free pool. If ``False`` (the default), the free items collection (of items that were created and were put back in the pool) acts as a round-robin, giving each item approximately equal utilization. If ``True``, the free pool acts as a FILO stack, which preferentially re-uses items that have most recently been used. """ self.min_size = min_size self.max_size = max_size self.order_as_stack = order_as_stack self.current_size = 0 self.channel = queue.LightQueue(0) self.free_items = collections.deque() if create is not None: self.create = create for x in range(min_size): self.current_size += 1 self.free_items.append(self.create()) def get(self): """Return an item from the pool, when one is available. This may cause the calling greenthread to block. """ if self.free_items: return self.free_items.popleft() self.current_size += 1 if self.current_size <= self.max_size: try: created = self.create() except: self.current_size -= 1 raise return created self.current_size -= 1 # did not create return self.channel.get() @contextmanager def item(self): """ Get an object out of the pool, for use with with statement. >>> from eventlet import pools >>> pool = pools.TokenPool(max_size=4) >>> with pool.item() as obj: ... print("got token") ... got token >>> pool.free() 4 """ obj = self.get() try: yield obj finally: self.put(obj) def put(self, item): """Put an item back into the pool, when done. This may cause the putting greenthread to block. """ if self.current_size > self.max_size: self.current_size -= 1 return if self.waiting(): try: self.channel.put(item, block=False) return except queue.Full: pass if self.order_as_stack: self.free_items.appendleft(item) else: self.free_items.append(item) def resize(self, new_size): """Resize the pool to *new_size*. Adjusting this number does not affect existing items checked out of the pool, nor on any greenthreads who are waiting for an item to free up. Some indeterminate number of :meth:`get`/:meth:`put` cycles will be necessary before the new maximum size truly matches the actual operation of the pool. """ self.max_size = new_size def free(self): """Return the number of free items in the pool. This corresponds to the number of :meth:`get` calls needed to empty the pool. """ return len(self.free_items) + self.max_size - self.current_size def waiting(self): """Return the number of routines waiting for a pool item. """ return max(0, self.channel.getting() - self.channel.putting()) def create(self): """Generate a new pool item. In order for the pool to function, either this method must be overriden in a subclass or the pool must be constructed with the `create` argument. It accepts no arguments and returns a single instance of whatever thing the pool is supposed to contain. In general, :meth:`create` is called whenever the pool exceeds its previous high-water mark of concurrently-checked-out-items. In other words, in a new pool with *min_size* of 0, the very first call to :meth:`get` will result in a call to :meth:`create`. If the first caller calls :meth:`put` before some other caller calls :meth:`get`, then the first item will be returned, and :meth:`create` will not be called a second time. """ raise NotImplementedError("Implement in subclass") class Token(object): pass class TokenPool(Pool): """A pool which gives out tokens (opaque unique objects), which indicate that the coroutine which holds the token has a right to consume some limited resource. """ def create(self): return Token() eventlet-0.30.2/eventlet/queue.py0000644000076500000240000004355214006212666017414 0ustar temotostaff00000000000000# Copyright (c) 2009 Denis Bilenko, denis.bilenko at gmail com # Copyright (c) 2010 Eventlet Contributors (see AUTHORS) # and licensed under the MIT license: # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Synchronized queues. The :mod:`eventlet.queue` module implements multi-producer, multi-consumer queues that work across greenlets, with the API similar to the classes found in the standard :mod:`Queue` and :class:`multiprocessing ` modules. A major difference is that queues in this module operate as channels when initialized with *maxsize* of zero. In such case, both :meth:`Queue.empty` and :meth:`Queue.full` return ``True`` and :meth:`Queue.put` always blocks until a call to :meth:`Queue.get` retrieves the item. An interesting difference, made possible because of greenthreads, is that :meth:`Queue.qsize`, :meth:`Queue.empty`, and :meth:`Queue.full` *can* be used as indicators of whether the subsequent :meth:`Queue.get` or :meth:`Queue.put` will not block. The new methods :meth:`Queue.getting` and :meth:`Queue.putting` report on the number of greenthreads blocking in :meth:`put ` or :meth:`get ` respectively. """ from __future__ import print_function import sys import heapq import collections import traceback from eventlet.event import Event from eventlet.greenthread import getcurrent from eventlet.hubs import get_hub import six from six.moves import queue as Stdlib_Queue from eventlet.timeout import Timeout __all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'LightQueue', 'Full', 'Empty'] _NONE = object() Full = six.moves.queue.Full Empty = six.moves.queue.Empty class Waiter(object): """A low level synchronization class. Wrapper around greenlet's ``switch()`` and ``throw()`` calls that makes them safe: * switching will occur only if the waiting greenlet is executing :meth:`wait` method currently. Otherwise, :meth:`switch` and :meth:`throw` are no-ops. * any error raised in the greenlet is handled inside :meth:`switch` and :meth:`throw` The :meth:`switch` and :meth:`throw` methods must only be called from the :class:`Hub` greenlet. The :meth:`wait` method must be called from a greenlet other than :class:`Hub`. """ __slots__ = ['greenlet'] def __init__(self): self.greenlet = None def __repr__(self): if self.waiting: waiting = ' waiting' else: waiting = '' return '<%s at %s%s greenlet=%r>' % ( type(self).__name__, hex(id(self)), waiting, self.greenlet, ) def __str__(self): """ >>> print(Waiter()) """ if self.waiting: waiting = ' waiting' else: waiting = '' return '<%s%s greenlet=%s>' % (type(self).__name__, waiting, self.greenlet) def __nonzero__(self): return self.greenlet is not None __bool__ = __nonzero__ @property def waiting(self): return self.greenlet is not None def switch(self, value=None): """Wake up the greenlet that is calling wait() currently (if there is one). Can only be called from Hub's greenlet. """ assert getcurrent() is get_hub( ).greenlet, "Can only use Waiter.switch method from the mainloop" if self.greenlet is not None: try: self.greenlet.switch(value) except Exception: traceback.print_exc() def throw(self, *throw_args): """Make greenlet calling wait() wake up (if there is a wait()). Can only be called from Hub's greenlet. """ assert getcurrent() is get_hub( ).greenlet, "Can only use Waiter.switch method from the mainloop" if self.greenlet is not None: try: self.greenlet.throw(*throw_args) except Exception: traceback.print_exc() # XXX should be renamed to get() ? and the whole class is called Receiver? def wait(self): """Wait until switch() or throw() is called. """ assert self.greenlet is None, 'This Waiter is already used by %r' % (self.greenlet, ) self.greenlet = getcurrent() try: return get_hub().switch() finally: self.greenlet = None class LightQueue(object): """ This is a variant of Queue that behaves mostly like the standard :class:`Stdlib_Queue`. It differs by not supporting the :meth:`task_done ` or :meth:`join ` methods, and is a little faster for not having that overhead. """ def __init__(self, maxsize=None): if maxsize is None or maxsize < 0: # None is not comparable in 3.x self.maxsize = None else: self.maxsize = maxsize self.getters = set() self.putters = set() self._event_unlock = None self._init(maxsize) # QQQ make maxsize into a property with setter that schedules unlock if necessary def _init(self, maxsize): self.queue = collections.deque() def _get(self): return self.queue.popleft() def _put(self, item): self.queue.append(item) def __repr__(self): return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format()) def __str__(self): return '<%s %s>' % (type(self).__name__, self._format()) def _format(self): result = 'maxsize=%r' % (self.maxsize, ) if getattr(self, 'queue', None): result += ' queue=%r' % self.queue if self.getters: result += ' getters[%s]' % len(self.getters) if self.putters: result += ' putters[%s]' % len(self.putters) if self._event_unlock is not None: result += ' unlocking' return result def qsize(self): """Return the size of the queue.""" return len(self.queue) def resize(self, size): """Resizes the queue's maximum size. If the size is increased, and there are putters waiting, they may be woken up.""" # None is not comparable in 3.x if self.maxsize is not None and (size is None or size > self.maxsize): # Maybe wake some stuff up self._schedule_unlock() self.maxsize = size def putting(self): """Returns the number of greenthreads that are blocked waiting to put items into the queue.""" return len(self.putters) def getting(self): """Returns the number of greenthreads that are blocked waiting on an empty queue.""" return len(self.getters) def empty(self): """Return ``True`` if the queue is empty, ``False`` otherwise.""" return not self.qsize() def full(self): """Return ``True`` if the queue is full, ``False`` otherwise. ``Queue(None)`` is never full. """ # None is not comparable in 3.x return self.maxsize is not None and self.qsize() >= self.maxsize def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional arg *block* is true and *timeout* is ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Full` exception if no free slot was available within that time. Otherwise (*block* is false), put an item on the queue if a free slot is immediately available, else raise the :class:`Full` exception (*timeout* is ignored in that case). """ if self.maxsize is None or self.qsize() < self.maxsize: # there's a free slot, put an item right away self._put(item) if self.getters: self._schedule_unlock() elif not block and get_hub().greenlet is getcurrent(): # we're in the mainloop, so we cannot wait; we can switch() to other greenlets though # find a getter and deliver an item to it while self.getters: getter = self.getters.pop() if getter: self._put(item) item = self._get() getter.switch(item) return raise Full elif block: waiter = ItemWaiter(item, block) self.putters.add(waiter) timeout = Timeout(timeout, Full) try: if self.getters: self._schedule_unlock() result = waiter.wait() assert result is waiter, "Invalid switch into Queue.put: %r" % (result, ) if waiter.item is not _NONE: self._put(item) finally: timeout.cancel() self.putters.discard(waiter) elif self.getters: waiter = ItemWaiter(item, block) self.putters.add(waiter) self._schedule_unlock() result = waiter.wait() assert result is waiter, "Invalid switch into Queue.put: %r" % (result, ) if waiter.item is not _NONE: raise Full else: raise Full def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the :class:`Full` exception. """ self.put(item, False) def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args *block* is true and *timeout* is ``None`` (the default), block if necessary until an item is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Empty` exception if no item was available within that time. Otherwise (*block* is false), return an item if one is immediately available, else raise the :class:`Empty` exception (*timeout* is ignored in that case). """ if self.qsize(): if self.putters: self._schedule_unlock() return self._get() elif not block and get_hub().greenlet is getcurrent(): # special case to make get_nowait() runnable in the mainloop greenlet # there are no items in the queue; try to fix the situation by unlocking putters while self.putters: putter = self.putters.pop() if putter: putter.switch(putter) if self.qsize(): return self._get() raise Empty elif block: waiter = Waiter() timeout = Timeout(timeout, Empty) try: self.getters.add(waiter) if self.putters: self._schedule_unlock() try: return waiter.wait() except: self._schedule_unlock() raise finally: self.getters.discard(waiter) timeout.cancel() else: raise Empty def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the :class:`Empty` exception. """ return self.get(False) def _unlock(self): try: while True: if self.qsize() and self.getters: getter = self.getters.pop() if getter: try: item = self._get() except: getter.throw(*sys.exc_info()) else: getter.switch(item) elif self.putters and self.getters: putter = self.putters.pop() if putter: getter = self.getters.pop() if getter: item = putter.item # this makes greenlet calling put() not to call _put() again putter.item = _NONE self._put(item) item = self._get() getter.switch(item) putter.switch(putter) else: self.putters.add(putter) elif self.putters and (self.getters or self.maxsize is None or self.qsize() < self.maxsize): putter = self.putters.pop() putter.switch(putter) elif self.putters and not self.getters: full = [p for p in self.putters if not p.block] if not full: break for putter in full: self.putters.discard(putter) get_hub().schedule_call_global( 0, putter.greenlet.throw, Full) else: break finally: self._event_unlock = None # QQQ maybe it's possible to obtain this info from libevent? # i.e. whether this event is pending _OR_ currently executing # testcase: 2 greenlets: while True: q.put(q.get()) - nothing else has a change to execute # to avoid this, schedule unlock with timer(0, ...) once in a while def _schedule_unlock(self): if self._event_unlock is None: self._event_unlock = get_hub().schedule_call_global(0, self._unlock) class ItemWaiter(Waiter): __slots__ = ['item', 'block'] def __init__(self, item, block): Waiter.__init__(self) self.item = item self.block = block class Queue(LightQueue): '''Create a queue object with a given maximum size. If *maxsize* is less than zero or ``None``, the queue size is infinite. ``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks until the item is delivered. (This is unlike the standard :class:`Stdlib_Queue`, where 0 means infinite size). In all other respects, this Queue class resembles the standard library, :class:`Stdlib_Queue`. ''' def __init__(self, maxsize=None): LightQueue.__init__(self, maxsize) self.unfinished_tasks = 0 self._cond = Event() def _format(self): result = LightQueue._format(self) if self.unfinished_tasks: result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond) return result def _put(self, item): LightQueue._put(self, item) self._put_bookkeeping() def _put_bookkeeping(self): self.unfinished_tasks += 1 if self._cond.ready(): self._cond.reset() def task_done(self): '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads. For each :meth:`get ` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue that the processing on the task is complete. If a :meth:`join` is currently blocking, it will resume when all items have been processed (meaning that a :meth:`task_done` call was received for every item that had been :meth:`put ` into the queue). Raises a :exc:`ValueError` if called more times than there were items placed in the queue. ''' if self.unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self.unfinished_tasks -= 1 if self.unfinished_tasks == 0: self._cond.send(None) def join(self): '''Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls :meth:`task_done` to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, :meth:`join` unblocks. ''' if self.unfinished_tasks > 0: self._cond.wait() class PriorityQueue(Queue): '''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first). Entries are typically tuples of the form: ``(priority number, data)``. ''' def _init(self, maxsize): self.queue = [] def _put(self, item, heappush=heapq.heappush): heappush(self.queue, item) self._put_bookkeeping() def _get(self, heappop=heapq.heappop): return heappop(self.queue) class LifoQueue(Queue): '''A subclass of :class:`Queue` that retrieves most recently added entries first.''' def _init(self, maxsize): self.queue = [] def _put(self, item): self.queue.append(item) self._put_bookkeeping() def _get(self): return self.queue.pop() eventlet-0.30.2/eventlet/semaphore.py0000644000076500000240000003012114006212666020237 0ustar temotostaff00000000000000import collections import eventlet from eventlet import hubs class Semaphore(object): """An unbounded semaphore. Optionally initialize with a resource *count*, then :meth:`acquire` and :meth:`release` resources as needed. Attempting to :meth:`acquire` when *count* is zero suspends the calling greenthread until *count* becomes nonzero again. This is API-compatible with :class:`threading.Semaphore`. It is a context manager, and thus can be used in a with block:: sem = Semaphore(2) with sem: do_some_stuff() If not specified, *value* defaults to 1. It is possible to limit acquire time:: sem = Semaphore() ok = sem.acquire(timeout=0.1) # True if acquired, False if timed out. """ def __init__(self, value=1): try: value = int(value) except ValueError as e: msg = 'Semaphore() expect value :: int, actual: {0} {1}'.format(type(value), str(e)) raise TypeError(msg) if value < 0: msg = 'Semaphore() expect value >= 0, actual: {0}'.format(repr(value)) raise ValueError(msg) self._original_value = value self.counter = value self._waiters = collections.deque() def __repr__(self): params = (self.__class__.__name__, hex(id(self)), self.counter, len(self._waiters)) return '<%s at %s c=%s _w[%s]>' % params def __str__(self): params = (self.__class__.__name__, self.counter, len(self._waiters)) return '<%s c=%s _w[%s]>' % params def _at_fork_reinit(self): self.counter = self._original_value self._waiters.clear() def locked(self): """Returns true if a call to acquire would block. """ return self.counter <= 0 def bounded(self): """Returns False; for consistency with :class:`~eventlet.semaphore.CappedSemaphore`. """ return False def acquire(self, blocking=True, timeout=None): """Acquire a semaphore. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. Timeout value must be strictly positive. """ if timeout == -1: timeout = None if timeout is not None and timeout < 0: raise ValueError("timeout value must be strictly positive") if not blocking: if timeout is not None: raise ValueError("can't specify timeout for non-blocking acquire") timeout = 0 if not blocking and self.locked(): return False current_thread = eventlet.getcurrent() if self.counter <= 0 or self._waiters: if current_thread not in self._waiters: self._waiters.append(current_thread) try: if timeout is not None: ok = False with eventlet.Timeout(timeout, False): while self.counter <= 0: hubs.get_hub().switch() ok = True if not ok: return False else: # If someone else is already in this wait loop, give them # a chance to get out. while True: hubs.get_hub().switch() if self.counter > 0: break finally: try: self._waiters.remove(current_thread) except ValueError: # Fine if its already been dropped. pass self.counter -= 1 return True def __enter__(self): self.acquire() def release(self, blocking=True): """Release a semaphore, incrementing the internal counter by one. When it was zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. The *blocking* argument is for consistency with CappedSemaphore and is ignored """ self.counter += 1 if self._waiters: hubs.get_hub().schedule_call_global(0, self._do_acquire) return True def _do_acquire(self): if self._waiters and self.counter > 0: waiter = self._waiters.popleft() waiter.switch() def __exit__(self, typ, val, tb): self.release() @property def balance(self): """An integer value that represents how many new calls to :meth:`acquire` or :meth:`release` would be needed to get the counter to 0. If it is positive, then its value is the number of acquires that can happen before the next acquire would block. If it is negative, it is the negative of the number of releases that would be required in order to make the counter 0 again (one more release would push the counter to 1 and unblock acquirers). It takes into account how many greenthreads are currently blocking in :meth:`acquire`. """ # positive means there are free items # zero means there are no free items but nobody has requested one # negative means there are requests for items, but no items return self.counter - len(self._waiters) class BoundedSemaphore(Semaphore): """A bounded semaphore checks to make sure its current value doesn't exceed its initial value. If it does, ValueError is raised. In most situations semaphores are used to guard resources with limited capacity. If the semaphore is released too many times it's a sign of a bug. If not given, *value* defaults to 1. """ def __init__(self, value=1): super(BoundedSemaphore, self).__init__(value) self.original_counter = value def release(self, blocking=True): """Release a semaphore, incrementing the internal counter by one. If the counter would exceed the initial value, raises ValueError. When it was zero on entry and another thread is waiting for it to become larger than zero again, wake up that thread. The *blocking* argument is for consistency with :class:`CappedSemaphore` and is ignored """ if self.counter >= self.original_counter: raise ValueError("Semaphore released too many times") return super(BoundedSemaphore, self).release(blocking) class CappedSemaphore(object): """A blockingly bounded semaphore. Optionally initialize with a resource *count*, then :meth:`acquire` and :meth:`release` resources as needed. Attempting to :meth:`acquire` when *count* is zero suspends the calling greenthread until count becomes nonzero again. Attempting to :meth:`release` after *count* has reached *limit* suspends the calling greenthread until *count* becomes less than *limit* again. This has the same API as :class:`threading.Semaphore`, though its semantics and behavior differ subtly due to the upper limit on calls to :meth:`release`. It is **not** compatible with :class:`threading.BoundedSemaphore` because it blocks when reaching *limit* instead of raising a ValueError. It is a context manager, and thus can be used in a with block:: sem = CappedSemaphore(2) with sem: do_some_stuff() """ def __init__(self, count, limit): if count < 0: raise ValueError("CappedSemaphore must be initialized with a " "positive number, got %s" % count) if count > limit: # accidentally, this also catches the case when limit is None raise ValueError("'count' cannot be more than 'limit'") self.lower_bound = Semaphore(count) self.upper_bound = Semaphore(limit - count) def __repr__(self): params = (self.__class__.__name__, hex(id(self)), self.balance, self.lower_bound, self.upper_bound) return '<%s at %s b=%s l=%s u=%s>' % params def __str__(self): params = (self.__class__.__name__, self.balance, self.lower_bound, self.upper_bound) return '<%s b=%s l=%s u=%s>' % params def locked(self): """Returns true if a call to acquire would block. """ return self.lower_bound.locked() def bounded(self): """Returns true if a call to release would block. """ return self.upper_bound.locked() def acquire(self, blocking=True): """Acquire a semaphore. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. """ if not blocking and self.locked(): return False self.upper_bound.release() try: return self.lower_bound.acquire() except: self.upper_bound.counter -= 1 # using counter directly means that it can be less than zero. # however I certainly don't need to wait here and I don't seem to have # a need to care about such inconsistency raise def __enter__(self): self.acquire() def release(self, blocking=True): """Release a semaphore. In this class, this behaves very much like an :meth:`acquire` but in the opposite direction. Imagine the docs of :meth:`acquire` here, but with every direction reversed. When calling this method, it will block if the internal counter is greater than or equal to *limit*. """ if not blocking and self.bounded(): return False self.lower_bound.release() try: return self.upper_bound.acquire() except: self.lower_bound.counter -= 1 raise def __exit__(self, typ, val, tb): self.release() @property def balance(self): """An integer value that represents how many new calls to :meth:`acquire` or :meth:`release` would be needed to get the counter to 0. If it is positive, then its value is the number of acquires that can happen before the next acquire would block. If it is negative, it is the negative of the number of releases that would be required in order to make the counter 0 again (one more release would push the counter to 1 and unblock acquirers). It takes into account how many greenthreads are currently blocking in :meth:`acquire` and :meth:`release`. """ return self.lower_bound.balance - self.upper_bound.balance eventlet-0.30.2/eventlet/support/0000755000076500000240000000000014017673044017424 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/support/__init__.py0000644000076500000240000000404514006212666021535 0ustar temotostaff00000000000000import inspect import functools import sys import warnings from eventlet.support import greenlets _MISSING = object() def get_errno(exc): """ Get the error code out of socket.error objects. socket.error in <2.5 does not have errno attribute socket.error in 3.x does not allow indexing access e.args[0] works for all. There are cases when args[0] is not errno. i.e. http://bugs.python.org/issue6471 Maybe there are cases when errno is set, but it is not the first argument? """ try: if exc.errno is not None: return exc.errno except AttributeError: pass try: return exc.args[0] except IndexError: return None if sys.version_info[0] < 3 and not greenlets.preserves_excinfo: from sys import exc_clear as clear_sys_exc_info else: def clear_sys_exc_info(): """No-op In py3k. Exception information is not visible outside of except statements. sys.exc_clear became obsolete and removed.""" pass if sys.version_info[0] < 3: def bytes_to_str(b, encoding='ascii'): return b else: def bytes_to_str(b, encoding='ascii'): return b.decode(encoding) PY33 = sys.version_info[:2] == (3, 3) def wrap_deprecated(old, new): def _resolve(s): return 'eventlet.'+s if '.' not in s else s msg = '''\ {old} is deprecated and will be removed in next version. Use {new} instead. Autoupgrade: fgrep -rl '{old}' . |xargs -t sed --in-place='' -e 's/{old}/{new}/' '''.format(old=_resolve(old), new=_resolve(new)) def wrapper(base): klass = None if inspect.isclass(base): class klass(base): pass klass.__name__ = base.__name__ klass.__module__ = base.__module__ @functools.wraps(base) def wrapped(*a, **kw): warnings.warn(msg, DeprecationWarning, stacklevel=5) return base(*a, **kw) if klass is not None: klass.__init__ = wrapped return klass return wrapped return wrapper eventlet-0.30.2/eventlet/support/greendns.py0000644000076500000240000007403514017672570021617 0ustar temotostaff00000000000000'''greendns - non-blocking DNS support for Eventlet ''' # Portions of this code taken from the gogreen project: # http://github.com/slideinc/gogreen # # Copyright (c) 2005-2010 Slide, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the author nor the names of other # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import re import struct import sys import eventlet from eventlet import patcher from eventlet.green import _socket_nodns from eventlet.green import os from eventlet.green import time from eventlet.green import select from eventlet.green import ssl import six def import_patched(module_name): # Import cycle note: it's crucial to use _socket_nodns here because # regular evenlet.green.socket imports *this* module and if we imported # it back we'd end with an import cycle (socket -> greendns -> socket). # We break this import cycle by providing a restricted socket module. modules = { 'select': select, 'time': time, 'os': os, 'socket': _socket_nodns, 'ssl': ssl, } return patcher.import_patched(module_name, **modules) dns = import_patched('dns') for pkg in dns.__all__: setattr(dns, pkg, import_patched('dns.' + pkg)) dns.rdtypes.__all__.extend(['dnskeybase', 'dsbase', 'txtbase']) for pkg in dns.rdtypes.__all__: setattr(dns.rdtypes, pkg, import_patched('dns.rdtypes.' + pkg)) for pkg in dns.rdtypes.IN.__all__: setattr(dns.rdtypes.IN, pkg, import_patched('dns.rdtypes.IN.' + pkg)) for pkg in dns.rdtypes.ANY.__all__: setattr(dns.rdtypes.ANY, pkg, import_patched('dns.rdtypes.ANY.' + pkg)) del import_patched socket = _socket_nodns DNS_QUERY_TIMEOUT = 10.0 HOSTS_TTL = 10.0 EAI_EAGAIN_ERROR = socket.gaierror(socket.EAI_AGAIN, 'Lookup timed out') EAI_NONAME_ERROR = socket.gaierror(socket.EAI_NONAME, 'Name or service not known') # EAI_NODATA was removed from RFC3493, it's now replaced with EAI_NONAME # socket.EAI_NODATA is not defined on FreeBSD, probably on some other platforms too. # https://lists.freebsd.org/pipermail/freebsd-ports/2003-October/005757.html EAI_NODATA_ERROR = EAI_NONAME_ERROR if (os.environ.get('EVENTLET_DEPRECATED_EAI_NODATA', '').lower() in ('1', 'y', 'yes') and hasattr(socket, 'EAI_NODATA')): EAI_NODATA_ERROR = socket.gaierror(socket.EAI_NODATA, 'No address associated with hostname') def is_ipv4_addr(host): """Return True if host is a valid IPv4 address""" if not isinstance(host, six.string_types): return False try: dns.ipv4.inet_aton(host) except dns.exception.SyntaxError: return False else: return True def is_ipv6_addr(host): """Return True if host is a valid IPv6 address""" if not isinstance(host, six.string_types): return False host = host.split('%', 1)[0] try: dns.ipv6.inet_aton(host) except dns.exception.SyntaxError: return False else: return True def is_ip_addr(host): """Return True if host is a valid IPv4 or IPv6 address""" return is_ipv4_addr(host) or is_ipv6_addr(host) class HostsAnswer(dns.resolver.Answer): """Answer class for HostsResolver object""" def __init__(self, qname, rdtype, rdclass, rrset, raise_on_no_answer=True): """Create a new answer :qname: A dns.name.Name instance of the query name :rdtype: The rdatatype of the query :rdclass: The rdataclass of the query :rrset: The dns.rrset.RRset with the response, must have ttl attribute :raise_on_no_answer: Whether to raise dns.resolver.NoAnswer if no answer. """ self.response = None self.qname = qname self.rdtype = rdtype self.rdclass = rdclass self.canonical_name = qname if not rrset and raise_on_no_answer: raise dns.resolver.NoAnswer() self.rrset = rrset self.expiration = (time.time() + rrset.ttl if hasattr(rrset, 'ttl') else 0) class HostsResolver(object): """Class to parse the hosts file Attributes ---------- :fname: The filename of the hosts file in use. :interval: The time between checking for hosts file modification """ LINES_RE = re.compile(r""" \s* # Leading space ([^\r\n#]*?) # The actual match, non-greedy so as not to include trailing space \s* # Trailing space (?:[#][^\r\n]+)? # Comments (?:$|[\r\n]+) # EOF or newline """, re.VERBOSE) def __init__(self, fname=None, interval=HOSTS_TTL): self._v4 = {} # name -> ipv4 self._v6 = {} # name -> ipv6 self._aliases = {} # name -> canonical_name self.interval = interval self.fname = fname if fname is None: if os.name == 'posix': self.fname = '/etc/hosts' elif os.name == 'nt': self.fname = os.path.expandvars( r'%SystemRoot%\system32\drivers\etc\hosts') self._last_load = 0 if self.fname: self._load() def _readlines(self): """Read the contents of the hosts file Return list of lines, comment lines and empty lines are excluded. Note that this performs disk I/O so can be blocking. """ try: with open(self.fname, 'rb') as fp: fdata = fp.read() except (IOError, OSError): return [] udata = fdata.decode(errors='ignore') return six.moves.filter(None, self.LINES_RE.findall(udata)) def _load(self): """Load hosts file This will unconditionally (re)load the data from the hosts file. """ lines = self._readlines() self._v4.clear() self._v6.clear() self._aliases.clear() for line in lines: parts = line.split() if len(parts) < 2: continue ip = parts.pop(0) if is_ipv4_addr(ip): ipmap = self._v4 elif is_ipv6_addr(ip): if ip.startswith('fe80'): # Do not use link-local addresses, OSX stores these here continue ipmap = self._v6 else: continue cname = parts.pop(0).lower() ipmap[cname] = ip for alias in parts: alias = alias.lower() ipmap[alias] = ip self._aliases[alias] = cname self._last_load = time.time() def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, tcp=False, source=None, raise_on_no_answer=True): """Query the hosts file The known rdtypes are dns.rdatatype.A, dns.rdatatype.AAAA and dns.rdatatype.CNAME. The ``rdclass`` parameter must be dns.rdataclass.IN while the ``tcp`` and ``source`` parameters are ignored. Return a HostAnswer instance or raise a dns.resolver.NoAnswer exception. """ now = time.time() if self._last_load + self.interval < now: self._load() rdclass = dns.rdataclass.IN if isinstance(qname, six.string_types): name = qname qname = dns.name.from_text(qname) else: name = str(qname) name = name.lower() rrset = dns.rrset.RRset(qname, rdclass, rdtype) rrset.ttl = self._last_load + self.interval - now if rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.A: addr = self._v4.get(name) if not addr and qname.is_absolute(): addr = self._v4.get(name[:-1]) if addr: rrset.add(dns.rdtypes.IN.A.A(rdclass, rdtype, addr)) elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.AAAA: addr = self._v6.get(name) if not addr and qname.is_absolute(): addr = self._v6.get(name[:-1]) if addr: rrset.add(dns.rdtypes.IN.AAAA.AAAA(rdclass, rdtype, addr)) elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.CNAME: cname = self._aliases.get(name) if not cname and qname.is_absolute(): cname = self._aliases.get(name[:-1]) if cname: rrset.add(dns.rdtypes.ANY.CNAME.CNAME( rdclass, rdtype, dns.name.from_text(cname))) return HostsAnswer(qname, rdtype, rdclass, rrset, raise_on_no_answer) def getaliases(self, hostname): """Return a list of all the aliases of a given cname""" # Due to the way store aliases this is a bit inefficient, this # clearly was an afterthought. But this is only used by # gethostbyname_ex so it's probably fine. aliases = [] if hostname in self._aliases: cannon = self._aliases[hostname] else: cannon = hostname aliases.append(cannon) for alias, cname in six.iteritems(self._aliases): if cannon == cname: aliases.append(alias) aliases.remove(hostname) return aliases class ResolverProxy(object): """Resolver class which can also use /etc/hosts Initialise with a HostsResolver instance in order for it to also use the hosts file. """ def __init__(self, hosts_resolver=None, filename='/etc/resolv.conf'): """Initialise the resolver proxy :param hosts_resolver: An instance of HostsResolver to use. :param filename: The filename containing the resolver configuration. The default value is correct for both UNIX and Windows, on Windows it will result in the configuration being read from the Windows registry. """ self._hosts = hosts_resolver self._filename = filename self.clear() def clear(self): self._resolver = dns.resolver.Resolver(filename=self._filename) self._resolver.cache = dns.resolver.LRUCache() def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, tcp=False, source=None, raise_on_no_answer=True, _hosts_rdtypes=(dns.rdatatype.A, dns.rdatatype.AAAA), use_network=True): """Query the resolver, using /etc/hosts if enabled. Behavior: 1. if hosts is enabled and contains answer, return it now 2. query nameservers for qname if use_network is True 3. if qname did not contain dots, pretend it was top-level domain, query "foobar." and append to previous result """ result = [None, None, 0] if qname is None: qname = '0.0.0.0' if isinstance(qname, six.string_types): qname = dns.name.from_text(qname, None) def step(fun, *args, **kwargs): try: a = fun(*args, **kwargs) except Exception as e: result[1] = e return False if a.rrset is not None and len(a.rrset): if result[0] is None: result[0] = a else: result[0].rrset.union_update(a.rrset) result[2] += len(a.rrset) return True def end(): if result[0] is not None: if raise_on_no_answer and result[2] == 0: raise dns.resolver.NoAnswer return result[0] if result[1] is not None: if raise_on_no_answer or not isinstance(result[1], dns.resolver.NoAnswer): raise result[1] raise dns.resolver.NXDOMAIN(qnames=(qname,)) if (self._hosts and (rdclass == dns.rdataclass.IN) and (rdtype in _hosts_rdtypes)): if step(self._hosts.query, qname, rdtype, raise_on_no_answer=False): if (result[0] is not None) or (result[1] is not None) or (not use_network): return end() # Main query step(self._resolver.query, qname, rdtype, rdclass, tcp, source, raise_on_no_answer=False) # `resolv.conf` docs say unqualified names must resolve from search (or local) domain. # However, common OS `getaddrinfo()` implementations append trailing dot (e.g. `db -> db.`) # and ask nameservers, as if top-level domain was queried. # This step follows established practice. # https://github.com/nameko/nameko/issues/392 # https://github.com/eventlet/eventlet/issues/363 if len(qname) == 1: step(self._resolver.query, qname.concatenate(dns.name.root), rdtype, rdclass, tcp, source, raise_on_no_answer=False) return end() def getaliases(self, hostname): """Return a list of all the aliases of a given hostname""" if self._hosts: aliases = self._hosts.getaliases(hostname) else: aliases = [] while True: try: ans = self._resolver.query(hostname, dns.rdatatype.CNAME) except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN): break else: aliases.extend(str(rr.target) for rr in ans.rrset) hostname = ans[0].target return aliases resolver = ResolverProxy(hosts_resolver=HostsResolver()) def resolve(name, family=socket.AF_INET, raises=True, _proxy=None, use_network=True): """Resolve a name for a given family using the global resolver proxy. This method is called by the global getaddrinfo() function. If use_network is False, only resolution via hosts file will be performed. Return a dns.resolver.Answer instance. If there is no answer it's rrset will be emtpy. """ if family == socket.AF_INET: rdtype = dns.rdatatype.A elif family == socket.AF_INET6: rdtype = dns.rdatatype.AAAA else: raise socket.gaierror(socket.EAI_FAMILY, 'Address family not supported') if _proxy is None: _proxy = resolver try: try: return _proxy.query(name, rdtype, raise_on_no_answer=raises, use_network=use_network) except dns.resolver.NXDOMAIN: if not raises: return HostsAnswer(dns.name.Name(name), rdtype, dns.rdataclass.IN, None, False) raise except dns.exception.Timeout: raise EAI_EAGAIN_ERROR except dns.exception.DNSException: raise EAI_NODATA_ERROR def resolve_cname(host): """Return the canonical name of a hostname""" try: ans = resolver.query(host, dns.rdatatype.CNAME) except dns.resolver.NoAnswer: return host except dns.exception.Timeout: raise EAI_EAGAIN_ERROR except dns.exception.DNSException: raise EAI_NODATA_ERROR else: return str(ans[0].target) def getaliases(host): """Return a list of for aliases for the given hostname This method does translate the dnspython exceptions into socket.gaierror exceptions. If no aliases are available an empty list will be returned. """ try: return resolver.getaliases(host) except dns.exception.Timeout: raise EAI_EAGAIN_ERROR except dns.exception.DNSException: raise EAI_NODATA_ERROR def _getaddrinfo_lookup(host, family, flags): """Resolve a hostname to a list of addresses Helper function for getaddrinfo. """ if flags & socket.AI_NUMERICHOST: raise EAI_NONAME_ERROR addrs = [] if family == socket.AF_UNSPEC: err = None for use_network in [False, True]: for qfamily in [socket.AF_INET6, socket.AF_INET]: try: answer = resolve(host, qfamily, False, use_network=use_network) except socket.gaierror as e: if e.errno not in (socket.EAI_AGAIN, EAI_NONAME_ERROR.errno, EAI_NODATA_ERROR.errno): raise err = e else: if answer.rrset: addrs.extend(rr.address for rr in answer.rrset) if addrs: break if err is not None and not addrs: raise err elif family == socket.AF_INET6 and flags & socket.AI_V4MAPPED: answer = resolve(host, socket.AF_INET6, False) if answer.rrset: addrs = [rr.address for rr in answer.rrset] if not addrs or flags & socket.AI_ALL: answer = resolve(host, socket.AF_INET, False) if answer.rrset: addrs = ['::ffff:' + rr.address for rr in answer.rrset] else: answer = resolve(host, family, False) if answer.rrset: addrs = [rr.address for rr in answer.rrset] return str(answer.qname), addrs def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0): """Replacement for Python's socket.getaddrinfo This does the A and AAAA lookups asynchronously after which it calls the OS' getaddrinfo(3) using the AI_NUMERICHOST flag. This flag ensures getaddrinfo(3) does not use the network itself and allows us to respect all the other arguments like the native OS. """ if isinstance(host, six.string_types): host = host.encode('idna').decode('ascii') if host is not None and not is_ip_addr(host): qname, addrs = _getaddrinfo_lookup(host, family, flags) else: qname = host addrs = [host] aiflags = (flags | socket.AI_NUMERICHOST) & (0xffff ^ socket.AI_CANONNAME) res = [] err = None for addr in addrs: try: ai = socket.getaddrinfo(addr, port, family, socktype, proto, aiflags) except socket.error as e: if flags & socket.AI_ADDRCONFIG: err = e continue raise res.extend(ai) if not res: if err: raise err raise socket.gaierror(socket.EAI_NONAME, 'No address found') if flags & socket.AI_CANONNAME: if not is_ip_addr(qname): qname = resolve_cname(qname).encode('ascii').decode('idna') ai = res[0] res[0] = (ai[0], ai[1], ai[2], qname, ai[4]) return res def gethostbyname(hostname): """Replacement for Python's socket.gethostbyname""" if is_ipv4_addr(hostname): return hostname rrset = resolve(hostname) return rrset[0].address def gethostbyname_ex(hostname): """Replacement for Python's socket.gethostbyname_ex""" if is_ipv4_addr(hostname): return (hostname, [], [hostname]) ans = resolve(hostname) aliases = getaliases(hostname) addrs = [rr.address for rr in ans.rrset] qname = str(ans.qname) if qname[-1] == '.': qname = qname[:-1] return (qname, aliases, addrs) def getnameinfo(sockaddr, flags): """Replacement for Python's socket.getnameinfo. Currently only supports IPv4. """ try: host, port = sockaddr except (ValueError, TypeError): if not isinstance(sockaddr, tuple): del sockaddr # to pass a stdlib test that is # hyper-careful about reference counts raise TypeError('getnameinfo() argument 1 must be a tuple') else: # must be ipv6 sockaddr, pretending we don't know how to resolve it raise EAI_NONAME_ERROR if (flags & socket.NI_NAMEREQD) and (flags & socket.NI_NUMERICHOST): # Conflicting flags. Punt. raise EAI_NONAME_ERROR if is_ipv4_addr(host): try: rrset = resolver.query( dns.reversename.from_address(host), dns.rdatatype.PTR) if len(rrset) > 1: raise socket.error('sockaddr resolved to multiple addresses') host = rrset[0].target.to_text(omit_final_dot=True) except dns.exception.Timeout: if flags & socket.NI_NAMEREQD: raise EAI_EAGAIN_ERROR except dns.exception.DNSException: if flags & socket.NI_NAMEREQD: raise EAI_NONAME_ERROR else: try: rrset = resolver.query(host) if len(rrset) > 1: raise socket.error('sockaddr resolved to multiple addresses') if flags & socket.NI_NUMERICHOST: host = rrset[0].address except dns.exception.Timeout: raise EAI_EAGAIN_ERROR except dns.exception.DNSException: raise socket.gaierror( (socket.EAI_NODATA, 'No address associated with hostname')) if not (flags & socket.NI_NUMERICSERV): proto = (flags & socket.NI_DGRAM) and 'udp' or 'tcp' port = socket.getservbyport(port, proto) return (host, port) def _net_read(sock, count, expiration): """coro friendly replacement for dns.query._net_read Read the specified number of bytes from sock. Keep trying until we either get the desired amount, or we hit EOF. A Timeout exception will be raised if the operation is not completed by the expiration time. """ s = bytearray() while count > 0: try: n = sock.recv(count) except socket.timeout: # Q: Do we also need to catch coro.CoroutineSocketWake and pass? if expiration - time.time() <= 0.0: raise dns.exception.Timeout eventlet.sleep(0.01) continue if n == b'': raise EOFError count = count - len(n) s += n return s def _net_write(sock, data, expiration): """coro friendly replacement for dns.query._net_write Write the specified data to the socket. A Timeout exception will be raised if the operation is not completed by the expiration time. """ current = 0 l = len(data) while current < l: try: current += sock.send(data[current:]) except socket.timeout: # Q: Do we also need to catch coro.CoroutineSocketWake and pass? if expiration - time.time() <= 0.0: raise dns.exception.Timeout def udp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53, af=None, source=None, source_port=0, ignore_unexpected=False): """coro friendly replacement for dns.query.udp Return the response obtained after sending a query via UDP. @param q: the query @type q: dns.message.Message @param where: where to send the message @type where: string containing an IPv4 or IPv6 address @param timeout: The number of seconds to wait before the query times out. If None, the default, wait forever. @type timeout: float @param port: The port to which to send the message. The default is 53. @type port: int @param af: the address family to use. The default is None, which causes the address family to use to be inferred from the form of of where. If the inference attempt fails, AF_INET is used. @type af: int @rtype: dns.message.Message object @param source: source address. The default is the IPv4 wildcard address. @type source: string @param source_port: The port from which to send the message. The default is 0. @type source_port: int @param ignore_unexpected: If True, ignore responses from unexpected sources. The default is False. @type ignore_unexpected: bool""" wire = q.to_wire() if af is None: try: af = dns.inet.af_for_address(where) except: af = dns.inet.AF_INET if af == dns.inet.AF_INET: destination = (where, port) if source is not None: source = (source, source_port) elif af == dns.inet.AF_INET6: # Purge any stray zeroes in source address. When doing the tuple comparison # below, we need to always ensure both our target and where we receive replies # from are compared with all zeroes removed so that we don't erroneously fail. # e.g. ('00::1', 53, 0, 0) != ('::1', 53, 0, 0) where_trunc = dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(where)) destination = (where_trunc, port, 0, 0) if source is not None: source = (source, source_port, 0, 0) s = socket.socket(af, socket.SOCK_DGRAM) s.settimeout(timeout) try: expiration = dns.query._compute_expiration(timeout) if source is not None: s.bind(source) while True: try: s.sendto(wire, destination) break except socket.timeout: # Q: Do we also need to catch coro.CoroutineSocketWake and pass? if expiration - time.time() <= 0.0: raise dns.exception.Timeout eventlet.sleep(0.01) continue tried = False while True: # If we've tried to receive at least once, check to see if our # timer expired if tried and (expiration - time.time() <= 0.0): raise dns.exception.Timeout # Sleep if we are retrying the operation due to a bad source # address or a socket timeout. if tried: eventlet.sleep(0.01) tried = True try: (wire, from_address) = s.recvfrom(65535) except socket.timeout: # Q: Do we also need to catch coro.CoroutineSocketWake and pass? continue if dns.inet.af_for_address(from_address[0]) == dns.inet.AF_INET6: # Purge all possible zeroes for ipv6 to match above logic addr = from_address[0] addr = dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(addr)) from_address = (addr, from_address[1], from_address[2], from_address[3]) if from_address == destination: break if not ignore_unexpected: raise dns.query.UnexpectedSource( 'got a response from %s instead of %s' % (from_address, destination)) finally: s.close() r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac) if not q.is_response(r): raise dns.query.BadResponse() return r def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53, af=None, source=None, source_port=0): """coro friendly replacement for dns.query.tcp Return the response obtained after sending a query via TCP. @param q: the query @type q: dns.message.Message object @param where: where to send the message @type where: string containing an IPv4 or IPv6 address @param timeout: The number of seconds to wait before the query times out. If None, the default, wait forever. @type timeout: float @param port: The port to which to send the message. The default is 53. @type port: int @param af: the address family to use. The default is None, which causes the address family to use to be inferred from the form of of where. If the inference attempt fails, AF_INET is used. @type af: int @rtype: dns.message.Message object @param source: source address. The default is the IPv4 wildcard address. @type source: string @param source_port: The port from which to send the message. The default is 0. @type source_port: int""" wire = q.to_wire() if af is None: try: af = dns.inet.af_for_address(where) except: af = dns.inet.AF_INET if af == dns.inet.AF_INET: destination = (where, port) if source is not None: source = (source, source_port) elif af == dns.inet.AF_INET6: destination = (where, port, 0, 0) if source is not None: source = (source, source_port, 0, 0) s = socket.socket(af, socket.SOCK_STREAM) s.settimeout(timeout) try: expiration = dns.query._compute_expiration(timeout) if source is not None: s.bind(source) while True: try: s.connect(destination) break except socket.timeout: # Q: Do we also need to catch coro.CoroutineSocketWake and pass? if expiration - time.time() <= 0.0: raise dns.exception.Timeout eventlet.sleep(0.01) continue l = len(wire) # copying the wire into tcpmsg is inefficient, but lets us # avoid writev() or doing a short write that would get pushed # onto the net tcpmsg = struct.pack("!H", l) + wire _net_write(s, tcpmsg, expiration) ldata = _net_read(s, 2, expiration) (l,) = struct.unpack("!H", ldata) wire = bytes(_net_read(s, l, expiration)) finally: s.close() r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac) if not q.is_response(r): raise dns.query.BadResponse() return r def reset(): resolver.clear() # Install our coro-friendly replacements for the tcp and udp query methods. dns.query.tcp = tcp dns.query.udp = udp eventlet-0.30.2/eventlet/support/greenlets.py0000644000076500000240000000045214006212666021764 0ustar temotostaff00000000000000import distutils.version import greenlet getcurrent = greenlet.greenlet.getcurrent GreenletExit = greenlet.greenlet.GreenletExit preserves_excinfo = (distutils.version.LooseVersion(greenlet.__version__) >= distutils.version.LooseVersion('0.3.2')) greenlet = greenlet.greenlet eventlet-0.30.2/eventlet/support/psycopg2_patcher.py0000644000076500000240000000434014006212666023250 0ustar temotostaff00000000000000"""A wait callback to allow psycopg2 cooperation with eventlet. Use `make_psycopg_green()` to enable eventlet support in Psycopg. """ # Copyright (C) 2010 Daniele Varrazzo # and licensed under the MIT license: # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import psycopg2 from psycopg2 import extensions import eventlet.hubs def make_psycopg_green(): """Configure Psycopg to be used with eventlet in non-blocking way.""" if not hasattr(extensions, 'set_wait_callback'): raise ImportError( "support for coroutines not available in this Psycopg version (%s)" % psycopg2.__version__) extensions.set_wait_callback(eventlet_wait_callback) def eventlet_wait_callback(conn, timeout=-1): """A wait callback useful to allow eventlet to work with Psycopg.""" while 1: state = conn.poll() if state == extensions.POLL_OK: break elif state == extensions.POLL_READ: eventlet.hubs.trampoline(conn.fileno(), read=True) elif state == extensions.POLL_WRITE: eventlet.hubs.trampoline(conn.fileno(), write=True) else: raise psycopg2.OperationalError( "Bad result from poll: %r" % state) eventlet-0.30.2/eventlet/support/pylib.py0000644000076500000240000000042214006212666021110 0ustar temotostaff00000000000000from py.magic import greenlet import sys import types def emulate(): module = types.ModuleType('greenlet') sys.modules['greenlet'] = module module.greenlet = greenlet module.getcurrent = greenlet.getcurrent module.GreenletExit = greenlet.GreenletExit eventlet-0.30.2/eventlet/support/stacklesspypys.py0000644000076500000240000000042314006212666023073 0ustar temotostaff00000000000000from stackless import greenlet import sys import types def emulate(): module = types.ModuleType('greenlet') sys.modules['greenlet'] = module module.greenlet = greenlet module.getcurrent = greenlet.getcurrent module.GreenletExit = greenlet.GreenletExit eventlet-0.30.2/eventlet/support/stacklesss.py0000644000076500000240000000351314006212666022154 0ustar temotostaff00000000000000""" Support for using stackless python. Broken and riddled with print statements at the moment. Please fix it! """ import sys import types import stackless caller = None coro_args = {} tasklet_to_greenlet = {} def getcurrent(): return tasklet_to_greenlet[stackless.getcurrent()] class FirstSwitch(object): def __init__(self, gr): self.gr = gr def __call__(self, *args, **kw): # print("first call", args, kw) gr = self.gr del gr.switch run, gr.run = gr.run, None t = stackless.tasklet(run) gr.t = t tasklet_to_greenlet[t] = gr t.setup(*args, **kw) t.run() class greenlet(object): def __init__(self, run=None, parent=None): self.dead = False if parent is None: parent = getcurrent() self.parent = parent if run is not None: self.run = run self.switch = FirstSwitch(self) def switch(self, *args): # print("switch", args) global caller caller = stackless.getcurrent() coro_args[self] = args self.t.insert() stackless.schedule() if caller is not self.t: caller.remove() rval = coro_args[self] return rval def run(self): pass def __bool__(self): return self.run is None and not self.dead class GreenletExit(Exception): pass def emulate(): module = types.ModuleType('greenlet') sys.modules['greenlet'] = module module.greenlet = greenlet module.getcurrent = getcurrent module.GreenletExit = GreenletExit caller = stackless.getcurrent() tasklet_to_greenlet[caller] = None main_coro = greenlet() tasklet_to_greenlet[caller] = main_coro main_coro.t = caller del main_coro.switch # It's already running coro_args[main_coro] = None eventlet-0.30.2/eventlet/timeout.py0000644000076500000240000001452114006212666017750 0ustar temotostaff00000000000000# Copyright (c) 2009-2010 Denis Bilenko, denis.bilenko at gmail com # Copyright (c) 2010 Eventlet Contributors (see AUTHORS) # and licensed under the MIT license: # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import functools import inspect import eventlet from eventlet.support import greenlets as greenlet from eventlet.hubs import get_hub __all__ = ['Timeout', 'with_timeout', 'wrap_is_timeout', 'is_timeout'] _MISSING = object() # deriving from BaseException so that "except Exception as e" doesn't catch # Timeout exceptions. class Timeout(BaseException): """Raises *exception* in the current greenthread after *timeout* seconds. When *exception* is omitted or ``None``, the :class:`Timeout` instance itself is raised. If *seconds* is None, the timer is not scheduled, and is only useful if you're planning to raise it directly. Timeout objects are context managers, and so can be used in with statements. When used in a with statement, if *exception* is ``False``, the timeout is still raised, but the context manager suppresses it, so the code outside the with-block won't see it. """ def __init__(self, seconds=None, exception=None): self.seconds = seconds self.exception = exception self.timer = None self.start() def start(self): """Schedule the timeout. This is called on construction, so it should not be called explicitly, unless the timer has been canceled.""" assert not self.pending, \ '%r is already started; to restart it, cancel it first' % self if self.seconds is None: # "fake" timeout (never expires) self.timer = None elif self.exception is None or isinstance(self.exception, bool): # timeout that raises self self.timer = get_hub().schedule_call_global( self.seconds, greenlet.getcurrent().throw, self) else: # regular timeout with user-provided exception self.timer = get_hub().schedule_call_global( self.seconds, greenlet.getcurrent().throw, self.exception) return self @property def pending(self): """True if the timeout is scheduled to be raised.""" if self.timer is not None: return self.timer.pending else: return False def cancel(self): """If the timeout is pending, cancel it. If not using Timeouts in ``with`` statements, always call cancel() in a ``finally`` after the block of code that is getting timed out. If not canceled, the timeout will be raised later on, in some unexpected section of the application.""" if self.timer is not None: self.timer.cancel() self.timer = None def __repr__(self): classname = self.__class__.__name__ if self.pending: pending = ' pending' else: pending = '' if self.exception is None: exception = '' else: exception = ' exception=%r' % self.exception return '<%s at %s seconds=%s%s%s>' % ( classname, hex(id(self)), self.seconds, exception, pending) def __str__(self): """ >>> raise Timeout # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Timeout """ if self.seconds is None: return '' if self.seconds == 1: suffix = '' else: suffix = 's' if self.exception is None or self.exception is True: return '%s second%s' % (self.seconds, suffix) elif self.exception is False: return '%s second%s (silent)' % (self.seconds, suffix) else: return '%s second%s (%s)' % (self.seconds, suffix, self.exception) def __enter__(self): if self.timer is None: self.start() return self def __exit__(self, typ, value, tb): self.cancel() if value is self and self.exception is False: return True @property def is_timeout(self): return True def with_timeout(seconds, function, *args, **kwds): """Wrap a call to some (yielding) function with a timeout; if the called function fails to return before the timeout, cancel it and return a flag value. """ timeout_value = kwds.pop("timeout_value", _MISSING) timeout = Timeout(seconds) try: try: return function(*args, **kwds) except Timeout as ex: if ex is timeout and timeout_value is not _MISSING: return timeout_value raise finally: timeout.cancel() def wrap_is_timeout(base): '''Adds `.is_timeout=True` attribute to objects returned by `base()`. When `base` is class, attribute is added as read-only property. Returns `base`. Otherwise, it returns a function that sets attribute on result of `base()` call. Wrappers make best effort to be transparent. ''' if inspect.isclass(base): base.is_timeout = property(lambda _: True) return base @functools.wraps(base) def fun(*args, **kwargs): ex = base(*args, **kwargs) ex.is_timeout = True return ex return fun def is_timeout(obj): py3err = getattr(__builtins__, 'TimeoutError', Timeout) return bool(getattr(obj, 'is_timeout', False)) or isinstance(obj, py3err) eventlet-0.30.2/eventlet/tpool.py0000644000076500000240000002473214006212666017424 0ustar temotostaff00000000000000# Copyright (c) 2007-2009, Linden Research, Inc. # Copyright (c) 2007, IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import atexit import imp import os import sys import traceback import eventlet from eventlet import event, greenio, greenthread, patcher, timeout import six __all__ = ['execute', 'Proxy', 'killall', 'set_num_threads'] EXC_CLASSES = (Exception, timeout.Timeout) SYS_EXCS = (GeneratorExit, KeyboardInterrupt, SystemExit) QUIET = True socket = patcher.original('socket') threading = patcher.original('threading') if six.PY2: Queue_module = patcher.original('Queue') if six.PY3: Queue_module = patcher.original('queue') Empty = Queue_module.Empty Queue = Queue_module.Queue _bytetosend = b' ' _coro = None _nthreads = int(os.environ.get('EVENTLET_THREADPOOL_SIZE', 20)) _reqq = _rspq = None _rsock = _wsock = None _setup_already = False _threads = [] def tpool_trampoline(): global _rspq while True: try: _c = _rsock.recv(1) assert _c # FIXME: this is probably redundant since using sockets instead of pipe now except ValueError: break # will be raised when pipe is closed while not _rspq.empty(): try: (e, rv) = _rspq.get(block=False) e.send(rv) e = rv = None except Empty: pass def tworker(): global _rspq while True: try: msg = _reqq.get() except AttributeError: return # can't get anything off of a dud queue if msg is None: return (e, meth, args, kwargs) = msg rv = None try: rv = meth(*args, **kwargs) except SYS_EXCS: raise except EXC_CLASSES: rv = sys.exc_info() if sys.version_info >= (3, 4): traceback.clear_frames(rv[1].__traceback__) if six.PY2: sys.exc_clear() # test_leakage_from_tracebacks verifies that the use of # exc_info does not lead to memory leaks _rspq.put((e, rv)) msg = meth = args = kwargs = e = rv = None _wsock.sendall(_bytetosend) def execute(meth, *args, **kwargs): """ Execute *meth* in a Python thread, blocking the current coroutine/ greenthread until the method completes. The primary use case for this is to wrap an object or module that is not amenable to monkeypatching or any of the other tricks that Eventlet uses to achieve cooperative yielding. With tpool, you can force such objects to cooperate with green threads by sticking them in native threads, at the cost of some overhead. """ setup() # if already in tpool, don't recurse into the tpool # also, call functions directly if we're inside an import lock, because # if meth does any importing (sadly common), it will hang my_thread = threading.currentThread() if my_thread in _threads or imp.lock_held() or _nthreads == 0: return meth(*args, **kwargs) e = event.Event() _reqq.put((e, meth, args, kwargs)) rv = e.wait() if isinstance(rv, tuple) \ and len(rv) == 3 \ and isinstance(rv[1], EXC_CLASSES): (c, e, tb) = rv if not QUIET: traceback.print_exception(c, e, tb) traceback.print_stack() six.reraise(c, e, tb) return rv def proxy_call(autowrap, f, *args, **kwargs): """ Call a function *f* and returns the value. If the type of the return value is in the *autowrap* collection, then it is wrapped in a :class:`Proxy` object before return. Normally *f* will be called in the threadpool with :func:`execute`; if the keyword argument "nonblocking" is set to ``True``, it will simply be executed directly. This is useful if you have an object which has methods that don't need to be called in a separate thread, but which return objects that should be Proxy wrapped. """ if kwargs.pop('nonblocking', False): rv = f(*args, **kwargs) else: rv = execute(f, *args, **kwargs) if isinstance(rv, autowrap): return Proxy(rv, autowrap) else: return rv class Proxy(object): """ a simple proxy-wrapper of any object that comes with a methods-only interface, in order to forward every method invocation onto a thread in the native-thread pool. A key restriction is that the object's methods should not switch greenlets or use Eventlet primitives, since they are in a different thread from the main hub, and therefore might behave unexpectedly. This is for running native-threaded code only. It's common to want to have some of the attributes or return values also wrapped in Proxy objects (for example, database connection objects produce cursor objects which also should be wrapped in Proxy objects to remain nonblocking). *autowrap*, if supplied, is a collection of types; if an attribute or return value matches one of those types (via isinstance), it will be wrapped in a Proxy. *autowrap_names* is a collection of strings, which represent the names of attributes that should be wrapped in Proxy objects when accessed. """ def __init__(self, obj, autowrap=(), autowrap_names=()): self._obj = obj self._autowrap = autowrap self._autowrap_names = autowrap_names def __getattr__(self, attr_name): f = getattr(self._obj, attr_name) if not hasattr(f, '__call__'): if isinstance(f, self._autowrap) or attr_name in self._autowrap_names: return Proxy(f, self._autowrap) return f def doit(*args, **kwargs): result = proxy_call(self._autowrap, f, *args, **kwargs) if attr_name in self._autowrap_names and not isinstance(result, Proxy): return Proxy(result) return result return doit # the following are a buncha methods that the python interpeter # doesn't use getattr to retrieve and therefore have to be defined # explicitly def __getitem__(self, key): return proxy_call(self._autowrap, self._obj.__getitem__, key) def __setitem__(self, key, value): return proxy_call(self._autowrap, self._obj.__setitem__, key, value) def __deepcopy__(self, memo=None): return proxy_call(self._autowrap, self._obj.__deepcopy__, memo) def __copy__(self, memo=None): return proxy_call(self._autowrap, self._obj.__copy__, memo) def __call__(self, *a, **kw): if '__call__' in self._autowrap_names: return Proxy(proxy_call(self._autowrap, self._obj, *a, **kw)) else: return proxy_call(self._autowrap, self._obj, *a, **kw) def __enter__(self): return proxy_call(self._autowrap, self._obj.__enter__) def __exit__(self, *exc): return proxy_call(self._autowrap, self._obj.__exit__, *exc) # these don't go through a proxy call, because they're likely to # be called often, and are unlikely to be implemented on the # wrapped object in such a way that they would block def __eq__(self, rhs): return self._obj == rhs def __hash__(self): return self._obj.__hash__() def __repr__(self): return self._obj.__repr__() def __str__(self): return self._obj.__str__() def __len__(self): return len(self._obj) def __nonzero__(self): return bool(self._obj) # Python3 __bool__ = __nonzero__ def __iter__(self): it = iter(self._obj) if it == self._obj: return self else: return Proxy(it) def next(self): return proxy_call(self._autowrap, next, self._obj) # Python3 __next__ = next def setup(): global _rsock, _wsock, _coro, _setup_already, _rspq, _reqq if _setup_already: return else: _setup_already = True assert _nthreads >= 0, "Can't specify negative number of threads" if _nthreads == 0: import warnings warnings.warn("Zero threads in tpool. All tpool.execute calls will\ execute in main thread. Check the value of the environment \ variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning) _reqq = Queue(maxsize=-1) _rspq = Queue(maxsize=-1) # connected socket pair sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('127.0.0.1', 0)) sock.listen(1) csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) csock.connect(sock.getsockname()) csock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True) _wsock, _addr = sock.accept() _wsock.settimeout(None) _wsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True) sock.close() _rsock = greenio.GreenSocket(csock) _rsock.settimeout(None) for i in six.moves.range(_nthreads): t = threading.Thread(target=tworker, name="tpool_thread_%s" % i) t.setDaemon(True) t.start() _threads.append(t) _coro = greenthread.spawn_n(tpool_trampoline) # This yield fixes subtle error with GreenSocket.__del__ eventlet.sleep(0) # Avoid ResourceWarning unclosed socket on Python3.2+ @atexit.register def killall(): global _setup_already, _rspq, _rsock, _wsock if not _setup_already: return # This yield fixes freeze in some scenarios eventlet.sleep(0) for thr in _threads: _reqq.put(None) for thr in _threads: thr.join() del _threads[:] # return any remaining results while (_rspq is not None) and not _rspq.empty(): try: (e, rv) = _rspq.get(block=False) e.send(rv) e = rv = None except Empty: pass if _coro is not None: greenthread.kill(_coro) if _rsock is not None: _rsock.close() _rsock = None if _wsock is not None: _wsock.close() _wsock = None _rspq = None _setup_already = False def set_num_threads(nthreads): global _nthreads _nthreads = nthreads eventlet-0.30.2/eventlet/websocket.py0000644000076500000240000007731614006212666020263 0ustar temotostaff00000000000000import base64 import codecs import collections import errno from random import Random from socket import error as SocketError import string import struct import sys import time import zlib try: from hashlib import md5, sha1 except ImportError: # pragma NO COVER from md5 import md5 from sha import sha as sha1 from eventlet import semaphore from eventlet import wsgi from eventlet.green import socket from eventlet.support import get_errno import six # Python 2's utf8 decoding is more lenient than we'd like # In order to pass autobahn's testsuite we need stricter validation # if available... for _mod in ('wsaccel.utf8validator', 'autobahn.utf8validator'): # autobahn has it's own python-based validator. in newest versions # this prefers to use wsaccel, a cython based implementation, if available. # wsaccel may also be installed w/out autobahn, or with a earlier version. try: utf8validator = __import__(_mod, {}, {}, ['']) except ImportError: utf8validator = None else: break ACCEPTABLE_CLIENT_ERRORS = set((errno.ECONNRESET, errno.EPIPE)) __all__ = ["WebSocketWSGI", "WebSocket"] PROTOCOL_GUID = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11' VALID_CLOSE_STATUS = set( list(range(1000, 1004)) + list(range(1007, 1012)) + # 3000-3999: reserved for use by libraries, frameworks, # and applications list(range(3000, 4000)) + # 4000-4999: reserved for private use and thus can't # be registered list(range(4000, 5000)) ) class BadRequest(Exception): def __init__(self, status='400 Bad Request', body=None, headers=None): super(Exception, self).__init__() self.status = status self.body = body self.headers = headers class WebSocketWSGI(object): """Wraps a websocket handler function in a WSGI application. Use it like this:: @websocket.WebSocketWSGI def my_handler(ws): from_browser = ws.wait() ws.send("from server") The single argument to the function will be an instance of :class:`WebSocket`. To close the socket, simply return from the function. Note that the server will log the websocket request at the time of closure. """ def __init__(self, handler): self.handler = handler self.protocol_version = None self.support_legacy_versions = True self.supported_protocols = [] self.origin_checker = None @classmethod def configured(cls, handler=None, supported_protocols=None, origin_checker=None, support_legacy_versions=False): def decorator(handler): inst = cls(handler) inst.support_legacy_versions = support_legacy_versions inst.origin_checker = origin_checker if supported_protocols: inst.supported_protocols = supported_protocols return inst if handler is None: return decorator return decorator(handler) def __call__(self, environ, start_response): http_connection_parts = [ part.strip() for part in environ.get('HTTP_CONNECTION', '').lower().split(',')] if not ('upgrade' in http_connection_parts and environ.get('HTTP_UPGRADE', '').lower() == 'websocket'): # need to check a few more things here for true compliance start_response('400 Bad Request', [('Connection', 'close')]) return [] try: if 'HTTP_SEC_WEBSOCKET_VERSION' in environ: ws = self._handle_hybi_request(environ) elif self.support_legacy_versions: ws = self._handle_legacy_request(environ) else: raise BadRequest() except BadRequest as e: status = e.status body = e.body or b'' headers = e.headers or [] start_response(status, [('Connection', 'close'), ] + headers) return [body] try: self.handler(ws) except socket.error as e: if get_errno(e) not in ACCEPTABLE_CLIENT_ERRORS: raise # Make sure we send the closing frame ws._send_closing_frame(True) # use this undocumented feature of eventlet.wsgi to ensure that it # doesn't barf on the fact that we didn't call start_response return wsgi.ALREADY_HANDLED def _handle_legacy_request(self, environ): if 'eventlet.input' in environ: sock = environ['eventlet.input'].get_socket() elif 'gunicorn.socket' in environ: sock = environ['gunicorn.socket'] else: raise Exception('No eventlet.input or gunicorn.socket present in environ.') if 'HTTP_SEC_WEBSOCKET_KEY1' in environ: self.protocol_version = 76 if 'HTTP_SEC_WEBSOCKET_KEY2' not in environ: raise BadRequest() else: self.protocol_version = 75 if self.protocol_version == 76: key1 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY1']) key2 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY2']) # There's no content-length header in the request, but it has 8 # bytes of data. environ['wsgi.input'].content_length = 8 key3 = environ['wsgi.input'].read(8) key = struct.pack(">II", key1, key2) + key3 response = md5(key).digest() # Start building the response scheme = 'ws' if environ.get('wsgi.url_scheme') == 'https': scheme = 'wss' location = '%s://%s%s%s' % ( scheme, environ.get('HTTP_HOST'), environ.get('SCRIPT_NAME'), environ.get('PATH_INFO') ) qs = environ.get('QUERY_STRING') if qs is not None: location += '?' + qs if self.protocol_version == 75: handshake_reply = ( b"HTTP/1.1 101 Web Socket Protocol Handshake\r\n" b"Upgrade: WebSocket\r\n" b"Connection: Upgrade\r\n" b"WebSocket-Origin: " + six.b(environ.get('HTTP_ORIGIN')) + b"\r\n" b"WebSocket-Location: " + six.b(location) + b"\r\n\r\n" ) elif self.protocol_version == 76: handshake_reply = ( b"HTTP/1.1 101 WebSocket Protocol Handshake\r\n" b"Upgrade: WebSocket\r\n" b"Connection: Upgrade\r\n" b"Sec-WebSocket-Origin: " + six.b(environ.get('HTTP_ORIGIN')) + b"\r\n" b"Sec-WebSocket-Protocol: " + six.b(environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'default')) + b"\r\n" b"Sec-WebSocket-Location: " + six.b(location) + b"\r\n" b"\r\n" + response ) else: # pragma NO COVER raise ValueError("Unknown WebSocket protocol version.") sock.sendall(handshake_reply) return WebSocket(sock, environ, self.protocol_version) def _parse_extension_header(self, header): if header is None: return None res = {} for ext in header.split(","): parts = ext.split(";") config = {} for part in parts[1:]: key_val = part.split("=") if len(key_val) == 1: config[key_val[0].strip().lower()] = True else: config[key_val[0].strip().lower()] = key_val[1].strip().strip('"').lower() res.setdefault(parts[0].strip().lower(), []).append(config) return res def _negotiate_permessage_deflate(self, extensions): if not extensions: return None deflate = extensions.get("permessage-deflate") if deflate is None: return None for config in deflate: # We'll evaluate each config in the client's preferred order and pick # the first that we can support. want_config = { # These are bool options, we can support both "server_no_context_takeover": config.get("server_no_context_takeover", False), "client_no_context_takeover": config.get("client_no_context_takeover", False) } # These are either bool OR int options. True means the client can accept a value # for the option, a number means the client wants that specific value. max_wbits = min(zlib.MAX_WBITS, 15) mwb = config.get("server_max_window_bits") if mwb is not None: if mwb is True: want_config["server_max_window_bits"] = max_wbits else: want_config["server_max_window_bits"] = \ int(config.get("server_max_window_bits", max_wbits)) if not (8 <= want_config["server_max_window_bits"] <= 15): continue mwb = config.get("client_max_window_bits") if mwb is not None: if mwb is True: want_config["client_max_window_bits"] = max_wbits else: want_config["client_max_window_bits"] = \ int(config.get("client_max_window_bits", max_wbits)) if not (8 <= want_config["client_max_window_bits"] <= 15): continue return want_config return None def _format_extension_header(self, parsed_extensions): if not parsed_extensions: return None parts = [] for name, config in parsed_extensions.items(): ext_parts = [six.b(name)] for key, value in config.items(): if value is False: pass elif value is True: ext_parts.append(six.b(key)) else: ext_parts.append(six.b("%s=%s" % (key, str(value)))) parts.append(b"; ".join(ext_parts)) return b", ".join(parts) def _handle_hybi_request(self, environ): if 'eventlet.input' in environ: sock = environ['eventlet.input'].get_socket() elif 'gunicorn.socket' in environ: sock = environ['gunicorn.socket'] else: raise Exception('No eventlet.input or gunicorn.socket present in environ.') hybi_version = environ['HTTP_SEC_WEBSOCKET_VERSION'] if hybi_version not in ('8', '13', ): raise BadRequest(status='426 Upgrade Required', headers=[('Sec-WebSocket-Version', '8, 13')]) self.protocol_version = int(hybi_version) if 'HTTP_SEC_WEBSOCKET_KEY' not in environ: # That's bad. raise BadRequest() origin = environ.get( 'HTTP_ORIGIN', (environ.get('HTTP_SEC_WEBSOCKET_ORIGIN', '') if self.protocol_version <= 8 else '')) if self.origin_checker is not None: if not self.origin_checker(environ.get('HTTP_HOST'), origin): raise BadRequest(status='403 Forbidden') protocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', None) negotiated_protocol = None if protocols: for p in (i.strip() for i in protocols.split(',')): if p in self.supported_protocols: negotiated_protocol = p break key = environ['HTTP_SEC_WEBSOCKET_KEY'] response = base64.b64encode(sha1(six.b(key) + PROTOCOL_GUID).digest()) handshake_reply = [b"HTTP/1.1 101 Switching Protocols", b"Upgrade: websocket", b"Connection: Upgrade", b"Sec-WebSocket-Accept: " + response] if negotiated_protocol: handshake_reply.append(b"Sec-WebSocket-Protocol: " + six.b(negotiated_protocol)) parsed_extensions = {} extensions = self._parse_extension_header(environ.get("HTTP_SEC_WEBSOCKET_EXTENSIONS")) deflate = self._negotiate_permessage_deflate(extensions) if deflate is not None: parsed_extensions["permessage-deflate"] = deflate formatted_ext = self._format_extension_header(parsed_extensions) if formatted_ext is not None: handshake_reply.append(b"Sec-WebSocket-Extensions: " + formatted_ext) sock.sendall(b'\r\n'.join(handshake_reply) + b'\r\n\r\n') return RFC6455WebSocket(sock, environ, self.protocol_version, protocol=negotiated_protocol, extensions=parsed_extensions) def _extract_number(self, value): """ Utility function which, given a string like 'g98sd 5[]221@1', will return 9852211. Used to parse the Sec-WebSocket-Key headers. """ out = "" spaces = 0 for char in value: if char in string.digits: out += char elif char == " ": spaces += 1 return int(out) // spaces class WebSocket(object): """A websocket object that handles the details of serialization/deserialization to the socket. The primary way to interact with a :class:`WebSocket` object is to call :meth:`send` and :meth:`wait` in order to pass messages back and forth with the browser. Also available are the following properties: path The path value of the request. This is the same as the WSGI PATH_INFO variable, but more convenient. protocol The value of the Websocket-Protocol header. origin The value of the 'Origin' header. environ The full WSGI environment for this request. """ def __init__(self, sock, environ, version=76): """ :param socket: The eventlet socket :type socket: :class:`eventlet.greenio.GreenSocket` :param environ: The wsgi environment :param version: The WebSocket spec version to follow (default is 76) """ self.log = environ.get('wsgi.errors', sys.stderr) self.log_context = 'server={shost}/{spath} client={caddr}:{cport}'.format( shost=environ.get('HTTP_HOST'), spath=environ.get('SCRIPT_NAME', '') + environ.get('PATH_INFO', ''), caddr=environ.get('REMOTE_ADDR'), cport=environ.get('REMOTE_PORT'), ) self.socket = sock self.origin = environ.get('HTTP_ORIGIN') self.protocol = environ.get('HTTP_WEBSOCKET_PROTOCOL') self.path = environ.get('PATH_INFO') self.environ = environ self.version = version self.websocket_closed = False self._buf = b"" self._msgs = collections.deque() self._sendlock = semaphore.Semaphore() def _pack_message(self, message): """Pack the message inside ``00`` and ``FF`` As per the dataframing section (5.3) for the websocket spec """ if isinstance(message, six.text_type): message = message.encode('utf-8') elif not isinstance(message, six.binary_type): message = six.b(str(message)) packed = b"\x00" + message + b"\xFF" return packed def _parse_messages(self): """ Parses for messages in the buffer *buf*. It is assumed that the buffer contains the start character for a message, but that it may contain only part of the rest of the message. Returns an array of messages, and the buffer remainder that didn't contain any full messages.""" msgs = [] end_idx = 0 buf = self._buf while buf: frame_type = six.indexbytes(buf, 0) if frame_type == 0: # Normal message. end_idx = buf.find(b"\xFF") if end_idx == -1: # pragma NO COVER break msgs.append(buf[1:end_idx].decode('utf-8', 'replace')) buf = buf[end_idx + 1:] elif frame_type == 255: # Closing handshake. assert six.indexbytes(buf, 1) == 0, "Unexpected closing handshake: %r" % buf self.websocket_closed = True break else: raise ValueError("Don't understand how to parse this type of message: %r" % buf) self._buf = buf return msgs def send(self, message): """Send a message to the browser. *message* should be convertable to a string; unicode objects should be encodable as utf-8. Raises socket.error with errno of 32 (broken pipe) if the socket has already been closed by the client.""" packed = self._pack_message(message) # if two greenthreads are trying to send at the same time # on the same socket, sendlock prevents interleaving and corruption self._sendlock.acquire() try: self.socket.sendall(packed) finally: self._sendlock.release() def wait(self): """Waits for and deserializes messages. Returns a single message; the oldest not yet processed. If the client has already closed the connection, returns None. This is different from normal socket behavior because the empty string is a valid websocket message.""" while not self._msgs: # Websocket might be closed already. if self.websocket_closed: return None # no parsed messages, must mean buf needs more data delta = self.socket.recv(8096) if delta == b'': return None self._buf += delta msgs = self._parse_messages() self._msgs.extend(msgs) return self._msgs.popleft() def _send_closing_frame(self, ignore_send_errors=False): """Sends the closing frame to the client, if required.""" if self.version == 76 and not self.websocket_closed: try: self.socket.sendall(b"\xff\x00") except SocketError: # Sometimes, like when the remote side cuts off the connection, # we don't care about this. if not ignore_send_errors: # pragma NO COVER raise self.websocket_closed = True def close(self): """Forcibly close the websocket; generally it is preferable to return from the handler method.""" try: self._send_closing_frame(True) self.socket.shutdown(True) except SocketError as e: if e.errno != errno.ENOTCONN: self.log.write('{ctx} socket shutdown error: {e}'.format(ctx=self.log_context, e=e)) finally: self.socket.close() class ConnectionClosedError(Exception): pass class FailedConnectionError(Exception): def __init__(self, status, message): super(FailedConnectionError, self).__init__(status, message) self.message = message self.status = status class ProtocolError(ValueError): pass class RFC6455WebSocket(WebSocket): def __init__(self, sock, environ, version=13, protocol=None, client=False, extensions=None): super(RFC6455WebSocket, self).__init__(sock, environ, version) self.iterator = self._iter_frames() self.client = client self.protocol = protocol self.extensions = extensions or {} self._deflate_enc = None self._deflate_dec = None class UTF8Decoder(object): def __init__(self): if utf8validator: self.validator = utf8validator.Utf8Validator() else: self.validator = None decoderclass = codecs.getincrementaldecoder('utf8') self.decoder = decoderclass() def reset(self): if self.validator: self.validator.reset() self.decoder.reset() def decode(self, data, final=False): if self.validator: valid, eocp, c_i, t_i = self.validator.validate(data) if not valid: raise ValueError('Data is not valid unicode') return self.decoder.decode(data, final) def _get_permessage_deflate_enc(self): options = self.extensions.get("permessage-deflate") if options is None: return None def _make(): return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -options.get("client_max_window_bits" if self.client else "server_max_window_bits", zlib.MAX_WBITS)) if options.get("client_no_context_takeover" if self.client else "server_no_context_takeover"): # This option means we have to make a new one every time return _make() else: if self._deflate_enc is None: self._deflate_enc = _make() return self._deflate_enc def _get_permessage_deflate_dec(self, rsv1): options = self.extensions.get("permessage-deflate") if options is None or not rsv1: return None def _make(): return zlib.decompressobj(-options.get("server_max_window_bits" if self.client else "client_max_window_bits", zlib.MAX_WBITS)) if options.get("server_no_context_takeover" if self.client else "client_no_context_takeover"): # This option means we have to make a new one every time return _make() else: if self._deflate_dec is None: self._deflate_dec = _make() return self._deflate_dec def _get_bytes(self, numbytes): data = b'' while len(data) < numbytes: d = self.socket.recv(numbytes - len(data)) if not d: raise ConnectionClosedError() data = data + d return data class Message(object): def __init__(self, opcode, decoder=None, decompressor=None): self.decoder = decoder self.data = [] self.finished = False self.opcode = opcode self.decompressor = decompressor def push(self, data, final=False): self.finished = final self.data.append(data) def getvalue(self): data = b"".join(self.data) if not self.opcode & 8 and self.decompressor: data = self.decompressor.decompress(data + b'\x00\x00\xff\xff') if self.decoder: data = self.decoder.decode(data, self.finished) return data @staticmethod def _apply_mask(data, mask, length=None, offset=0): if length is None: length = len(data) cnt = range(length) return b''.join(six.int2byte(six.indexbytes(data, i) ^ mask[(offset + i) % 4]) for i in cnt) def _handle_control_frame(self, opcode, data): if opcode == 8: # connection close if not data: status = 1000 elif len(data) > 1: status = struct.unpack_from('!H', data)[0] if not status or status not in VALID_CLOSE_STATUS: raise FailedConnectionError( 1002, "Unexpected close status code.") try: data = self.UTF8Decoder().decode(data[2:], True) except (UnicodeDecodeError, ValueError): raise FailedConnectionError( 1002, "Close message data should be valid UTF-8.") else: status = 1002 self.close(close_data=(status, '')) raise ConnectionClosedError() elif opcode == 9: # ping self.send(data, control_code=0xA) elif opcode == 0xA: # pong pass else: raise FailedConnectionError( 1002, "Unknown control frame received.") def _iter_frames(self): fragmented_message = None try: while True: message = self._recv_frame(message=fragmented_message) if message.opcode & 8: self._handle_control_frame( message.opcode, message.getvalue()) continue if fragmented_message and message is not fragmented_message: raise RuntimeError('Unexpected message change.') fragmented_message = message if message.finished: data = fragmented_message.getvalue() fragmented_message = None yield data except FailedConnectionError: exc_typ, exc_val, exc_tb = sys.exc_info() self.close(close_data=(exc_val.status, exc_val.message)) except ConnectionClosedError: return except Exception: self.close(close_data=(1011, 'Internal Server Error')) raise def _recv_frame(self, message=None): recv = self._get_bytes # Unpacking the frame described in Section 5.2 of RFC6455 # (https://tools.ietf.org/html/rfc6455#section-5.2) header = recv(2) a, b = struct.unpack('!BB', header) finished = a >> 7 == 1 rsv123 = a >> 4 & 7 rsv1 = rsv123 & 4 if rsv123: if rsv1 and "permessage-deflate" not in self.extensions: # must be zero - unless it's compressed then rsv1 is true raise FailedConnectionError( 1002, "RSV1, RSV2, RSV3: MUST be 0 unless an extension is" " negotiated that defines meanings for non-zero values.") opcode = a & 15 if opcode not in (0, 1, 2, 8, 9, 0xA): raise FailedConnectionError(1002, "Unknown opcode received.") masked = b & 128 == 128 if not masked and not self.client: raise FailedConnectionError(1002, "A client MUST mask all frames" " that it sends to the server") length = b & 127 if opcode & 8: if not finished: raise FailedConnectionError(1002, "Control frames must not" " be fragmented.") if length > 125: raise FailedConnectionError( 1002, "All control frames MUST have a payload length of 125" " bytes or less") elif opcode and message: raise FailedConnectionError( 1002, "Received a non-continuation opcode within" " fragmented message.") elif not opcode and not message: raise FailedConnectionError( 1002, "Received continuation opcode with no previous" " fragments received.") if length == 126: length = struct.unpack('!H', recv(2))[0] elif length == 127: length = struct.unpack('!Q', recv(8))[0] if masked: mask = struct.unpack('!BBBB', recv(4)) received = 0 if not message or opcode & 8: decoder = self.UTF8Decoder() if opcode == 1 else None decompressor = self._get_permessage_deflate_dec(rsv1) message = self.Message(opcode, decoder=decoder, decompressor=decompressor) if not length: message.push(b'', final=finished) else: while received < length: d = self.socket.recv(length - received) if not d: raise ConnectionClosedError() dlen = len(d) if masked: d = self._apply_mask(d, mask, length=dlen, offset=received) received = received + dlen try: message.push(d, final=finished) except (UnicodeDecodeError, ValueError): raise FailedConnectionError( 1007, "Text data must be valid utf-8") return message def _pack_message(self, message, masked=False, continuation=False, final=True, control_code=None): is_text = False if isinstance(message, six.text_type): message = message.encode('utf-8') is_text = True compress_bit = 0 compressor = self._get_permessage_deflate_enc() if message and compressor: message = compressor.compress(message) message += compressor.flush(zlib.Z_SYNC_FLUSH) assert message[-4:] == b"\x00\x00\xff\xff" message = message[:-4] compress_bit = 1 << 6 length = len(message) if not length: # no point masking empty data masked = False if control_code: if control_code not in (8, 9, 0xA): raise ProtocolError('Unknown control opcode.') if continuation or not final: raise ProtocolError('Control frame cannot be a fragment.') if length > 125: raise ProtocolError('Control frame data too large (>125).') header = struct.pack('!B', control_code | 1 << 7) else: opcode = 0 if continuation else ((1 if is_text else 2) | compress_bit) header = struct.pack('!B', opcode | (1 << 7 if final else 0)) lengthdata = 1 << 7 if masked else 0 if length > 65535: lengthdata = struct.pack('!BQ', lengthdata | 127, length) elif length > 125: lengthdata = struct.pack('!BH', lengthdata | 126, length) else: lengthdata = struct.pack('!B', lengthdata | length) if masked: # NOTE: RFC6455 states: # A server MUST NOT mask any frames that it sends to the client rand = Random(time.time()) mask = [rand.getrandbits(8) for _ in six.moves.xrange(4)] message = RFC6455WebSocket._apply_mask(message, mask, length) maskdata = struct.pack('!BBBB', *mask) else: maskdata = b'' return b''.join((header, lengthdata, maskdata, message)) def wait(self): for i in self.iterator: return i def _send(self, frame): self._sendlock.acquire() try: self.socket.sendall(frame) finally: self._sendlock.release() def send(self, message, **kw): kw['masked'] = self.client payload = self._pack_message(message, **kw) self._send(payload) def _send_closing_frame(self, ignore_send_errors=False, close_data=None): if self.version in (8, 13) and not self.websocket_closed: if close_data is not None: status, msg = close_data if isinstance(msg, six.text_type): msg = msg.encode('utf-8') data = struct.pack('!H', status) + msg else: data = '' try: self.send(data, control_code=8) except SocketError: # Sometimes, like when the remote side cuts off the connection, # we don't care about this. if not ignore_send_errors: # pragma NO COVER raise self.websocket_closed = True def close(self, close_data=None): """Forcibly close the websocket; generally it is preferable to return from the handler method.""" try: self._send_closing_frame(close_data=close_data, ignore_send_errors=True) self.socket.shutdown(socket.SHUT_WR) except SocketError as e: if e.errno != errno.ENOTCONN: self.log.write('{ctx} socket shutdown error: {e}'.format(ctx=self.log_context, e=e)) finally: self.socket.close() eventlet-0.30.2/eventlet/wsgi.py0000644000076500000240000011172414006212666017236 0ustar temotostaff00000000000000import errno import os import sys import time import traceback import types import warnings import eventlet from eventlet import greenio from eventlet import support from eventlet.green import BaseHTTPServer from eventlet.green import socket import six from six.moves import urllib DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024 DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1' MAX_REQUEST_LINE = 8192 MAX_HEADER_LINE = 8192 MAX_TOTAL_HEADER_SIZE = 65536 MINIMUM_CHUNK_SIZE = 4096 # %(client_port)s is also available DEFAULT_LOG_FORMAT = ('%(client_ip)s - - [%(date_time)s] "%(request_line)s"' ' %(status_code)s %(body_length)s %(wall_seconds).6f') RESPONSE_414 = b'''HTTP/1.0 414 Request URI Too Long\r\n\ Connection: close\r\n\ Content-Length: 0\r\n\r\n''' is_accepting = True STATE_IDLE = 'idle' STATE_REQUEST = 'request' STATE_CLOSE = 'close' __all__ = ['server', 'format_date_time'] # Weekday and month names for HTTP date/time formatting; always English! _weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] _monthname = [None, # Dummy so we can use 1-based month numbers "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] def format_date_time(timestamp): """Formats a unix timestamp into an HTTP standard string.""" year, month, day, hh, mm, ss, wd, _y, _z = time.gmtime(timestamp) return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( _weekdayname[wd], day, _monthname[month], year, hh, mm, ss ) def addr_to_host_port(addr): host = 'unix' port = '' if isinstance(addr, tuple): host = addr[0] port = addr[1] return (host, port) # Collections of error codes to compare against. Not all attributes are set # on errno module on all platforms, so some are literals :( BAD_SOCK = set((errno.EBADF, 10053)) BROKEN_SOCK = set((errno.EPIPE, errno.ECONNRESET)) class ChunkReadError(ValueError): pass # special flag return value for apps class _AlreadyHandled(object): def __iter__(self): return self def next(self): raise StopIteration __next__ = next ALREADY_HANDLED = _AlreadyHandled() class Input(object): def __init__(self, rfile, content_length, sock, wfile=None, wfile_line=None, chunked_input=False): self.rfile = rfile self._sock = sock if content_length is not None: content_length = int(content_length) self.content_length = content_length self.wfile = wfile self.wfile_line = wfile_line self.position = 0 self.chunked_input = chunked_input self.chunk_length = -1 # (optional) headers to send with a "100 Continue" response. Set by # calling set_hundred_continue_respose_headers() on env['wsgi.input'] self.hundred_continue_headers = None self.is_hundred_continue_response_sent = False # handle_one_response should give us a ref to the response state so we # know whether we can still send the 100 Continue; until then, though, # we're flying blind self.headers_sent = None def send_hundred_continue_response(self): if self.headers_sent: # To late; application has already started sending data back # to the client # TODO: maybe log a warning if self.hundred_continue_headers # is not None? return towrite = [] # 100 Continue status line towrite.append(self.wfile_line) # Optional headers if self.hundred_continue_headers is not None: # 100 Continue headers for header in self.hundred_continue_headers: towrite.append(six.b('%s: %s\r\n' % header)) # Blank line towrite.append(b'\r\n') self.wfile.writelines(towrite) self.wfile.flush() # Reinitialize chunk_length (expect more data) self.chunk_length = -1 def _do_read(self, reader, length=None): if self.wfile is not None and not self.is_hundred_continue_response_sent: # 100 Continue response self.send_hundred_continue_response() self.is_hundred_continue_response_sent = True if (self.content_length is not None) and ( length is None or length > self.content_length - self.position): length = self.content_length - self.position if not length: return b'' try: read = reader(length) except greenio.SSL.ZeroReturnError: read = b'' self.position += len(read) return read def _chunked_read(self, rfile, length=None, use_readline=False): if self.wfile is not None and not self.is_hundred_continue_response_sent: # 100 Continue response self.send_hundred_continue_response() self.is_hundred_continue_response_sent = True try: if length == 0: return b"" if length and length < 0: length = None if use_readline: reader = self.rfile.readline else: reader = self.rfile.read response = [] while self.chunk_length != 0: maxreadlen = self.chunk_length - self.position if length is not None and length < maxreadlen: maxreadlen = length if maxreadlen > 0: data = reader(maxreadlen) if not data: self.chunk_length = 0 raise IOError("unexpected end of file while parsing chunked data") datalen = len(data) response.append(data) self.position += datalen if self.chunk_length == self.position: rfile.readline() if length is not None: length -= datalen if length == 0: break if use_readline and data[-1:] == b"\n": break else: try: self.chunk_length = int(rfile.readline().split(b";", 1)[0], 16) except ValueError as err: raise ChunkReadError(err) self.position = 0 if self.chunk_length == 0: rfile.readline() except greenio.SSL.ZeroReturnError: pass return b''.join(response) def read(self, length=None): if self.chunked_input: return self._chunked_read(self.rfile, length) return self._do_read(self.rfile.read, length) def readline(self, size=None): if self.chunked_input: return self._chunked_read(self.rfile, size, True) else: return self._do_read(self.rfile.readline, size) def readlines(self, hint=None): if self.chunked_input: lines = [] for line in iter(self.readline, b''): lines.append(line) if hint and hint > 0: hint -= len(line) if hint <= 0: break return lines else: return self._do_read(self.rfile.readlines, hint) def __iter__(self): return iter(self.read, b'') def get_socket(self): return self._sock def set_hundred_continue_response_headers(self, headers, capitalize_response_headers=True): # Response headers capitalization (default) # CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN # Per HTTP RFC standard, header name is case-insensitive. # Please, fix your client to ignore header case if possible. if capitalize_response_headers: headers = [ ('-'.join([x.capitalize() for x in key.split('-')]), value) for key, value in headers] self.hundred_continue_headers = headers def discard(self, buffer_size=16 << 10): while self.read(buffer_size): pass class HeaderLineTooLong(Exception): pass class HeadersTooLarge(Exception): pass def get_logger(log, debug): if callable(getattr(log, 'info', None)) \ and callable(getattr(log, 'debug', None)): return log else: return LoggerFileWrapper(log or sys.stderr, debug) class LoggerNull(object): def __init__(self): pass def error(self, msg, *args, **kwargs): pass def info(self, msg, *args, **kwargs): pass def debug(self, msg, *args, **kwargs): pass def write(self, msg, *args): pass class LoggerFileWrapper(LoggerNull): def __init__(self, log, debug): self.log = log self._debug = debug def error(self, msg, *args, **kwargs): self.write(msg, *args) def info(self, msg, *args, **kwargs): self.write(msg, *args) def debug(self, msg, *args, **kwargs): if self._debug: self.write(msg, *args) def write(self, msg, *args): msg = msg + '\n' if args: msg = msg % args self.log.write(msg) class FileObjectForHeaders(object): def __init__(self, fp): self.fp = fp self.total_header_size = 0 def readline(self, size=-1): sz = size if size < 0: sz = MAX_HEADER_LINE rv = self.fp.readline(sz) if len(rv) >= MAX_HEADER_LINE: raise HeaderLineTooLong() self.total_header_size += len(rv) if self.total_header_size > MAX_TOTAL_HEADER_SIZE: raise HeadersTooLarge() return rv class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): protocol_version = 'HTTP/1.1' minimum_chunk_size = MINIMUM_CHUNK_SIZE capitalize_response_headers = True # https://github.com/eventlet/eventlet/issues/295 # Stdlib default is 0 (unbuffered), but then `wfile.writelines()` looses data # so before going back to unbuffered, remove any usage of `writelines`. wbufsize = 16 << 10 def __init__(self, conn_state, server): self.request = conn_state[1] self.client_address = conn_state[0] self.conn_state = conn_state self.server = server self.setup() try: self.handle() finally: self.finish() def setup(self): # overriding SocketServer.setup to correctly handle SSL.Connection objects conn = self.connection = self.request # TCP_QUICKACK is a better alternative to disabling Nagle's algorithm # https://news.ycombinator.com/item?id=10607422 if getattr(socket, 'TCP_QUICKACK', None): try: conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, True) except socket.error: pass try: self.rfile = conn.makefile('rb', self.rbufsize) self.wfile = conn.makefile('wb', self.wbufsize) except (AttributeError, NotImplementedError): if hasattr(conn, 'send') and hasattr(conn, 'recv'): # it's an SSL.Connection self.rfile = socket._fileobject(conn, "rb", self.rbufsize) self.wfile = socket._fileobject(conn, "wb", self.wbufsize) else: # it's a SSLObject, or a martian raise NotImplementedError( '''eventlet.wsgi doesn't support sockets of type {0}'''.format(type(conn))) def handle(self): self.close_connection = True while True: self.handle_one_request() if self.conn_state[2] == STATE_CLOSE: self.close_connection = 1 if self.close_connection: break def _read_request_line(self): if self.rfile.closed: self.close_connection = 1 return '' try: return self.rfile.readline(self.server.url_length_limit) except greenio.SSL.ZeroReturnError: pass except socket.error as e: last_errno = support.get_errno(e) if last_errno in BROKEN_SOCK: self.server.log.debug('({0}) connection reset by peer {1!r}'.format( self.server.pid, self.client_address)) elif last_errno not in BAD_SOCK: raise return '' def handle_one_request(self): if self.server.max_http_version: self.protocol_version = self.server.max_http_version self.raw_requestline = self._read_request_line() if not self.raw_requestline: self.close_connection = 1 return if len(self.raw_requestline) >= self.server.url_length_limit: self.wfile.write(RESPONSE_414) self.close_connection = 1 return orig_rfile = self.rfile try: self.rfile = FileObjectForHeaders(self.rfile) if not self.parse_request(): return except HeaderLineTooLong: self.wfile.write( b"HTTP/1.0 400 Header Line Too Long\r\n" b"Connection: close\r\nContent-length: 0\r\n\r\n") self.close_connection = 1 return except HeadersTooLarge: self.wfile.write( b"HTTP/1.0 400 Headers Too Large\r\n" b"Connection: close\r\nContent-length: 0\r\n\r\n") self.close_connection = 1 return finally: self.rfile = orig_rfile content_length = self.headers.get('content-length') if content_length is not None: try: if int(content_length) < 0: raise ValueError except ValueError: # Negative, or not an int at all self.wfile.write( b"HTTP/1.0 400 Bad Request\r\n" b"Connection: close\r\nContent-length: 0\r\n\r\n") self.close_connection = 1 return self.environ = self.get_environ() self.application = self.server.app try: self.server.outstanding_requests += 1 try: self.handle_one_response() except socket.error as e: # Broken pipe, connection reset by peer if support.get_errno(e) not in BROKEN_SOCK: raise finally: self.server.outstanding_requests -= 1 def handle_one_response(self): start = time.time() headers_set = [] headers_sent = [] # Push the headers-sent state into the Input so it won't send a # 100 Continue response if we've already started a response. self.environ['wsgi.input'].headers_sent = headers_sent wfile = self.wfile result = None use_chunked = [False] length = [0] status_code = [200] def write(data): towrite = [] if not headers_set: raise AssertionError("write() before start_response()") elif not headers_sent: status, response_headers = headers_set headers_sent.append(1) header_list = [header[0].lower() for header in response_headers] towrite.append(six.b('%s %s\r\n' % (self.protocol_version, status))) for header in response_headers: towrite.append(six.b('%s: %s\r\n' % header)) # send Date header? if 'date' not in header_list: towrite.append(six.b('Date: %s\r\n' % (format_date_time(time.time()),))) client_conn = self.headers.get('Connection', '').lower() send_keep_alive = False if self.close_connection == 0 and \ self.server.keepalive and (client_conn == 'keep-alive' or (self.request_version == 'HTTP/1.1' and not client_conn == 'close')): # only send keep-alives back to clients that sent them, # it's redundant for 1.1 connections send_keep_alive = (client_conn == 'keep-alive') self.close_connection = 0 else: self.close_connection = 1 if 'content-length' not in header_list: if self.request_version == 'HTTP/1.1': use_chunked[0] = True towrite.append(b'Transfer-Encoding: chunked\r\n') elif 'content-length' not in header_list: # client is 1.0 and therefore must read to EOF self.close_connection = 1 if self.close_connection: towrite.append(b'Connection: close\r\n') elif send_keep_alive: towrite.append(b'Connection: keep-alive\r\n') towrite.append(b'\r\n') # end of header writing if use_chunked[0]: # Write the chunked encoding towrite.append(six.b("%x" % (len(data),)) + b"\r\n" + data + b"\r\n") else: towrite.append(data) wfile.writelines(towrite) wfile.flush() length[0] = length[0] + sum(map(len, towrite)) def start_response(status, response_headers, exc_info=None): status_code[0] = status.split()[0] if exc_info: try: if headers_sent: # Re-raise original exception if headers sent six.reraise(exc_info[0], exc_info[1], exc_info[2]) finally: # Avoid dangling circular ref exc_info = None # Response headers capitalization # CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN # Per HTTP RFC standard, header name is case-insensitive. # Please, fix your client to ignore header case if possible. if self.capitalize_response_headers: if six.PY2: def cap(x): return x.capitalize() else: def cap(x): return x.encode('latin1').capitalize().decode('latin1') response_headers = [ ('-'.join([cap(x) for x in key.split('-')]), value) for key, value in response_headers] headers_set[:] = [status, response_headers] return write try: try: result = self.application(self.environ, start_response) if (isinstance(result, _AlreadyHandled) or isinstance(getattr(result, '_obj', None), _AlreadyHandled)): self.close_connection = 1 return # Set content-length if possible if not headers_sent and hasattr(result, '__len__') and \ 'Content-Length' not in [h for h, _v in headers_set[1]]: headers_set[1].append(('Content-Length', str(sum(map(len, result))))) towrite = [] towrite_size = 0 just_written_size = 0 minimum_write_chunk_size = int(self.environ.get( 'eventlet.minimum_write_chunk_size', self.minimum_chunk_size)) for data in result: if len(data) == 0: continue if isinstance(data, six.text_type): data = data.encode('ascii') towrite.append(data) towrite_size += len(data) if towrite_size >= minimum_write_chunk_size: write(b''.join(towrite)) towrite = [] just_written_size = towrite_size towrite_size = 0 if towrite: just_written_size = towrite_size write(b''.join(towrite)) if not headers_sent or (use_chunked[0] and just_written_size): write(b'') except Exception: self.close_connection = 1 tb = traceback.format_exc() self.server.log.info(tb) if not headers_sent: err_body = six.b(tb) if self.server.debug else b'' start_response("500 Internal Server Error", [('Content-type', 'text/plain'), ('Content-length', len(err_body))]) write(err_body) finally: if hasattr(result, 'close'): result.close() request_input = self.environ['eventlet.input'] if (request_input.chunked_input or request_input.position < (request_input.content_length or 0)): # Read and discard body if there was no pending 100-continue if not request_input.wfile and self.close_connection == 0: try: request_input.discard() except ChunkReadError as e: self.close_connection = 1 self.server.log.error(( 'chunked encoding error while discarding request body.' + ' client={0} request="{1}" error="{2}"').format( self.get_client_address()[0], self.requestline, e, )) except IOError as e: self.close_connection = 1 self.server.log.error(( 'I/O error while discarding request body.' + ' client={0} request="{1}" error="{2}"').format( self.get_client_address()[0], self.requestline, e, )) finish = time.time() for hook, args, kwargs in self.environ['eventlet.posthooks']: hook(self.environ, *args, **kwargs) if self.server.log_output: client_host, client_port = self.get_client_address() self.server.log.info(self.server.log_format % { 'client_ip': client_host, 'client_port': client_port, 'date_time': self.log_date_time_string(), 'request_line': self.requestline, 'status_code': status_code[0], 'body_length': length[0], 'wall_seconds': finish - start, }) def get_client_address(self): host, port = addr_to_host_port(self.client_address) if self.server.log_x_forwarded_for: forward = self.headers.get('X-Forwarded-For', '').replace(' ', '') if forward: host = forward + ',' + host return (host, port) def get_environ(self): env = self.server.get_environ() env['REQUEST_METHOD'] = self.command env['SCRIPT_NAME'] = '' pq = self.path.split('?', 1) env['RAW_PATH_INFO'] = pq[0] if six.PY2: env['PATH_INFO'] = urllib.parse.unquote(pq[0]) else: env['PATH_INFO'] = urllib.parse.unquote(pq[0], encoding='latin1') if len(pq) > 1: env['QUERY_STRING'] = pq[1] ct = self.headers.get('content-type') if ct is None: try: ct = self.headers.type except AttributeError: ct = self.headers.get_content_type() env['CONTENT_TYPE'] = ct length = self.headers.get('content-length') if length: env['CONTENT_LENGTH'] = length env['SERVER_PROTOCOL'] = 'HTTP/1.0' sockname = self.request.getsockname() server_addr = addr_to_host_port(sockname) env['SERVER_NAME'] = server_addr[0] env['SERVER_PORT'] = str(server_addr[1]) client_addr = addr_to_host_port(self.client_address) env['REMOTE_ADDR'] = client_addr[0] env['REMOTE_PORT'] = str(client_addr[1]) env['GATEWAY_INTERFACE'] = 'CGI/1.1' try: headers = self.headers.headers except AttributeError: headers = self.headers._headers else: headers = [h.split(':', 1) for h in headers] env['headers_raw'] = headers_raw = tuple((k, v.strip(' \t\n\r')) for k, v in headers) for k, v in headers_raw: k = k.replace('-', '_').upper() if k in ('CONTENT_TYPE', 'CONTENT_LENGTH'): # These do not get the HTTP_ prefix and were handled above continue envk = 'HTTP_' + k if envk in env: env[envk] += ',' + v else: env[envk] = v if env.get('HTTP_EXPECT', '').lower() == '100-continue': wfile = self.wfile wfile_line = b'HTTP/1.1 100 Continue\r\n' else: wfile = None wfile_line = None chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked' env['wsgi.input'] = env['eventlet.input'] = Input( self.rfile, length, self.connection, wfile=wfile, wfile_line=wfile_line, chunked_input=chunked) env['eventlet.posthooks'] = [] return env def finish(self): try: BaseHTTPServer.BaseHTTPRequestHandler.finish(self) except socket.error as e: # Broken pipe, connection reset by peer if support.get_errno(e) not in BROKEN_SOCK: raise greenio.shutdown_safe(self.connection) self.connection.close() def handle_expect_100(self): return True class Server(BaseHTTPServer.HTTPServer): def __init__(self, socket, address, app, log=None, environ=None, max_http_version=None, protocol=HttpProtocol, minimum_chunk_size=None, log_x_forwarded_for=True, keepalive=True, log_output=True, log_format=DEFAULT_LOG_FORMAT, url_length_limit=MAX_REQUEST_LINE, debug=True, socket_timeout=None, capitalize_response_headers=True): self.outstanding_requests = 0 self.socket = socket self.address = address self.log = LoggerNull() if log_output: self.log = get_logger(log, debug) self.app = app self.keepalive = keepalive self.environ = environ self.max_http_version = max_http_version self.protocol = protocol self.pid = os.getpid() self.minimum_chunk_size = minimum_chunk_size self.log_x_forwarded_for = log_x_forwarded_for self.log_output = log_output self.log_format = log_format self.url_length_limit = url_length_limit self.debug = debug self.socket_timeout = socket_timeout self.capitalize_response_headers = capitalize_response_headers if not self.capitalize_response_headers: warnings.warn("""capitalize_response_headers is disabled. Please, make sure you know what you are doing. HTTP headers names are case-insensitive per RFC standard. Most likely, you need to fix HTTP parsing in your client software.""", DeprecationWarning, stacklevel=3) def get_environ(self): d = { 'wsgi.errors': sys.stderr, 'wsgi.version': (1, 0), 'wsgi.multithread': True, 'wsgi.multiprocess': False, 'wsgi.run_once': False, 'wsgi.url_scheme': 'http', } # detect secure socket if hasattr(self.socket, 'do_handshake'): d['wsgi.url_scheme'] = 'https' d['HTTPS'] = 'on' if self.environ is not None: d.update(self.environ) return d def process_request(self, conn_state): # The actual request handling takes place in __init__, so we need to # set minimum_chunk_size before __init__ executes and we don't want to modify # class variable proto = new(self.protocol) if self.minimum_chunk_size is not None: proto.minimum_chunk_size = self.minimum_chunk_size proto.capitalize_response_headers = self.capitalize_response_headers try: proto.__init__(conn_state, self) except socket.timeout: # Expected exceptions are not exceptional conn_state[1].close() # similar to logging "accepted" in server() self.log.debug('({0}) timed out {1!r}'.format(self.pid, conn_state[0])) def log_message(self, message): raise AttributeError('''\ eventlet.wsgi.server.log_message was deprecated and deleted. Please use server.log.info instead.''') try: new = types.InstanceType except AttributeError: new = lambda cls: cls.__new__(cls) try: import ssl ACCEPT_EXCEPTIONS = (socket.error, ssl.SSLError) ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET, ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_SSL)) except ImportError: ACCEPT_EXCEPTIONS = (socket.error,) ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET)) def socket_repr(sock): scheme = 'http' if hasattr(sock, 'do_handshake'): scheme = 'https' name = sock.getsockname() if sock.family == socket.AF_INET: hier_part = '//{0}:{1}'.format(*name) elif sock.family == socket.AF_INET6: hier_part = '//[{0}]:{1}'.format(*name[:2]) elif sock.family == socket.AF_UNIX: hier_part = name else: hier_part = repr(name) return scheme + ':' + hier_part def server(sock, site, log=None, environ=None, max_size=None, max_http_version=DEFAULT_MAX_HTTP_VERSION, protocol=HttpProtocol, server_event=None, minimum_chunk_size=None, log_x_forwarded_for=True, custom_pool=None, keepalive=True, log_output=True, log_format=DEFAULT_LOG_FORMAT, url_length_limit=MAX_REQUEST_LINE, debug=True, socket_timeout=None, capitalize_response_headers=True): """Start up a WSGI server handling requests from the supplied server socket. This function loops forever. The *sock* object will be closed after server exits, but the underlying file descriptor will remain open, so if you have a dup() of *sock*, it will remain usable. .. warning:: At the moment :func:`server` will always wait for active connections to finish before exiting, even if there's an exception raised inside it (*all* exceptions are handled the same way, including :class:`greenlet.GreenletExit` and those inheriting from `BaseException`). While this may not be an issue normally, when it comes to long running HTTP connections (like :mod:`eventlet.websocket`) it will become problematic and calling :meth:`~eventlet.greenthread.GreenThread.wait` on a thread that runs the server may hang, even after using :meth:`~eventlet.greenthread.GreenThread.kill`, as long as there are active connections. :param sock: Server socket, must be already bound to a port and listening. :param site: WSGI application function. :param log: logging.Logger instance or file-like object that logs should be written to. If a Logger instance is supplied, messages are sent to the INFO log level. If not specified, sys.stderr is used. :param environ: Additional parameters that go into the environ dictionary of every request. :param max_size: Maximum number of client connections opened at any time by this server. Default is 1024. :param max_http_version: Set to "HTTP/1.0" to make the server pretend it only supports HTTP 1.0. This can help with applications or clients that don't behave properly using HTTP 1.1. :param protocol: Protocol class. Deprecated. :param server_event: Used to collect the Server object. Deprecated. :param minimum_chunk_size: Minimum size in bytes for http chunks. This can be used to improve performance of applications which yield many small strings, though using it technically violates the WSGI spec. This can be overridden on a per request basis by setting environ['eventlet.minimum_write_chunk_size']. :param log_x_forwarded_for: If True (the default), logs the contents of the x-forwarded-for header in addition to the actual client ip address in the 'client_ip' field of the log line. :param custom_pool: A custom GreenPool instance which is used to spawn client green threads. If this is supplied, max_size is ignored. :param keepalive: If set to False, disables keepalives on the server; all connections will be closed after serving one request. :param log_output: A Boolean indicating if the server will log data or not. :param log_format: A python format string that is used as the template to generate log lines. The following values can be formatted into it: client_ip, date_time, request_line, status_code, body_length, wall_seconds. The default is a good example of how to use it. :param url_length_limit: A maximum allowed length of the request url. If exceeded, 414 error is returned. :param debug: True if the server should send exception tracebacks to the clients on 500 errors. If False, the server will respond with empty bodies. :param socket_timeout: Timeout for client connections' socket operations. Default None means wait forever. :param capitalize_response_headers: Normalize response headers' names to Foo-Bar. Default is True. """ serv = Server( sock, sock.getsockname(), site, log, environ=environ, max_http_version=max_http_version, protocol=protocol, minimum_chunk_size=minimum_chunk_size, log_x_forwarded_for=log_x_forwarded_for, keepalive=keepalive, log_output=log_output, log_format=log_format, url_length_limit=url_length_limit, debug=debug, socket_timeout=socket_timeout, capitalize_response_headers=capitalize_response_headers, ) if server_event is not None: warnings.warn( 'eventlet.wsgi.Server() server_event kwarg is deprecated and will be removed soon', DeprecationWarning, stacklevel=2) server_event.send(serv) if max_size is None: max_size = DEFAULT_MAX_SIMULTANEOUS_REQUESTS if custom_pool is not None: pool = custom_pool else: pool = eventlet.GreenPool(max_size) if not (hasattr(pool, 'spawn') and hasattr(pool, 'waitall')): raise AttributeError('''\ eventlet.wsgi.Server pool must provide methods: `spawn`, `waitall`. If unsure, use eventlet.GreenPool.''') # [addr, socket, state] connections = {} def _clean_connection(_, conn): connections.pop(conn[0], None) conn[2] = STATE_CLOSE greenio.shutdown_safe(conn[1]) conn[1].close() try: serv.log.info('({0}) wsgi starting up on {1}'.format(serv.pid, socket_repr(sock))) while is_accepting: try: client_socket, client_addr = sock.accept() client_socket.settimeout(serv.socket_timeout) serv.log.debug('({0}) accepted {1!r}'.format(serv.pid, client_addr)) connections[client_addr] = connection = [client_addr, client_socket, STATE_IDLE] (pool.spawn(serv.process_request, connection) .link(_clean_connection, connection)) except ACCEPT_EXCEPTIONS as e: if support.get_errno(e) not in ACCEPT_ERRNO: raise except (KeyboardInterrupt, SystemExit): serv.log.info('wsgi exiting') break finally: for cs in six.itervalues(connections): prev_state = cs[2] cs[2] = STATE_CLOSE if prev_state == STATE_IDLE: greenio.shutdown_safe(cs[1]) pool.waitall() serv.log.info('({0}) wsgi exited, is_accepting={1}'.format(serv.pid, is_accepting)) try: # NOTE: It's not clear whether we want this to leave the # socket open or close it. Use cases like Spawning want # the underlying fd to remain open, but if we're going # that far we might as well not bother closing sock at # all. sock.close() except socket.error as e: if support.get_errno(e) not in BROKEN_SOCK: traceback.print_exc() eventlet-0.30.2/eventlet/zipkin/0000755000076500000240000000000014017673044017214 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/zipkin/__init__.py0000644000076500000240000000000014006212666021310 0ustar temotostaff00000000000000eventlet-0.30.2/eventlet/zipkin/_thrift/0000755000076500000240000000000014017673044020653 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/zipkin/_thrift/__init__.py0000644000076500000240000000000014006212666022747 0ustar temotostaff00000000000000eventlet-0.30.2/eventlet/zipkin/_thrift/zipkinCore/0000755000076500000240000000000014017673044022770 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet/zipkin/_thrift/zipkinCore/__init__.py0000644000076500000240000000004214006212666025072 0ustar temotostaff00000000000000__all__ = ['ttypes', 'constants'] eventlet-0.30.2/eventlet/zipkin/_thrift/zipkinCore/constants.py0000644000076500000240000000042314006212666025352 0ustar temotostaff00000000000000# # Autogenerated by Thrift Compiler (0.8.0) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # from thrift.Thrift import TType, TMessageType, TException from ttypes import * CLIENT_SEND = "cs" CLIENT_RECV = "cr" SERVER_SEND = "ss" SERVER_RECV = "sr" eventlet-0.30.2/eventlet/zipkin/_thrift/zipkinCore/ttypes.py0000644000076500000240000003227114006212666024674 0ustar temotostaff00000000000000# # Autogenerated by Thrift Compiler (0.8.0) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # from thrift.Thrift import TType, TMessageType, TException from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol, TProtocol try: from thrift.protocol import fastbinary except: fastbinary = None class AnnotationType: BOOL = 0 BYTES = 1 I16 = 2 I32 = 3 I64 = 4 DOUBLE = 5 STRING = 6 _VALUES_TO_NAMES = { 0: "BOOL", 1: "BYTES", 2: "I16", 3: "I32", 4: "I64", 5: "DOUBLE", 6: "STRING", } _NAMES_TO_VALUES = { "BOOL": 0, "BYTES": 1, "I16": 2, "I32": 3, "I64": 4, "DOUBLE": 5, "STRING": 6, } class Endpoint: """ Attributes: - ipv4 - port - service_name """ thrift_spec = ( None, # 0 (1, TType.I32, 'ipv4', None, None, ), # 1 (2, TType.I16, 'port', None, None, ), # 2 (3, TType.STRING, 'service_name', None, None, ), # 3 ) def __init__(self, ipv4=None, port=None, service_name=None,): self.ipv4 = ipv4 self.port = port self.service_name = service_name def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.ipv4 = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I16: self.port = iprot.readI16(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.service_name = iprot.readString(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('Endpoint') if self.ipv4 is not None: oprot.writeFieldBegin('ipv4', TType.I32, 1) oprot.writeI32(self.ipv4) oprot.writeFieldEnd() if self.port is not None: oprot.writeFieldBegin('port', TType.I16, 2) oprot.writeI16(self.port) oprot.writeFieldEnd() if self.service_name is not None: oprot.writeFieldBegin('service_name', TType.STRING, 3) oprot.writeString(self.service_name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Annotation: """ Attributes: - timestamp - value - host """ thrift_spec = ( None, # 0 (1, TType.I64, 'timestamp', None, None, ), # 1 (2, TType.STRING, 'value', None, None, ), # 2 (3, TType.STRUCT, 'host', (Endpoint, Endpoint.thrift_spec), None, ), # 3 ) def __init__(self, timestamp=None, value=None, host=None,): self.timestamp = timestamp self.value = value self.host = host def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I64: self.timestamp = iprot.readI64(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.value = iprot.readString(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.host = Endpoint() self.host.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('Annotation') if self.timestamp is not None: oprot.writeFieldBegin('timestamp', TType.I64, 1) oprot.writeI64(self.timestamp) oprot.writeFieldEnd() if self.value is not None: oprot.writeFieldBegin('value', TType.STRING, 2) oprot.writeString(self.value) oprot.writeFieldEnd() if self.host is not None: oprot.writeFieldBegin('host', TType.STRUCT, 3) self.host.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class BinaryAnnotation: """ Attributes: - key - value - annotation_type - host """ thrift_spec = ( None, # 0 (1, TType.STRING, 'key', None, None, ), # 1 (2, TType.STRING, 'value', None, None, ), # 2 (3, TType.I32, 'annotation_type', None, None, ), # 3 (4, TType.STRUCT, 'host', (Endpoint, Endpoint.thrift_spec), None, ), # 4 ) def __init__(self, key=None, value=None, annotation_type=None, host=None,): self.key = key self.value = value self.annotation_type = annotation_type self.host = host def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.key = iprot.readString(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.value = iprot.readString(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.annotation_type = iprot.readI32(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: self.host = Endpoint() self.host.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('BinaryAnnotation') if self.key is not None: oprot.writeFieldBegin('key', TType.STRING, 1) oprot.writeString(self.key) oprot.writeFieldEnd() if self.value is not None: oprot.writeFieldBegin('value', TType.STRING, 2) oprot.writeString(self.value) oprot.writeFieldEnd() if self.annotation_type is not None: oprot.writeFieldBegin('annotation_type', TType.I32, 3) oprot.writeI32(self.annotation_type) oprot.writeFieldEnd() if self.host is not None: oprot.writeFieldBegin('host', TType.STRUCT, 4) self.host.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Span: """ Attributes: - trace_id - name - id - parent_id - annotations - binary_annotations """ thrift_spec = ( None, # 0 (1, TType.I64, 'trace_id', None, None, ), # 1 None, # 2 (3, TType.STRING, 'name', None, None, ), # 3 (4, TType.I64, 'id', None, None, ), # 4 (5, TType.I64, 'parent_id', None, None, ), # 5 (6, TType.LIST, 'annotations', (TType.STRUCT,(Annotation, Annotation.thrift_spec)), None, ), # 6 None, # 7 (8, TType.LIST, 'binary_annotations', (TType.STRUCT,(BinaryAnnotation, BinaryAnnotation.thrift_spec)), None, ), # 8 ) def __init__(self, trace_id=None, name=None, id=None, parent_id=None, annotations=None, binary_annotations=None,): self.trace_id = trace_id self.name = name self.id = id self.parent_id = parent_id self.annotations = annotations self.binary_annotations = binary_annotations def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I64: self.trace_id = iprot.readI64(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.name = iprot.readString(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I64: self.id = iprot.readI64(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.I64: self.parent_id = iprot.readI64(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.annotations = [] (_etype3, _size0) = iprot.readListBegin() for _i4 in xrange(_size0): _elem5 = Annotation() _elem5.read(iprot) self.annotations.append(_elem5) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 8: if ftype == TType.LIST: self.binary_annotations = [] (_etype9, _size6) = iprot.readListBegin() for _i10 in xrange(_size6): _elem11 = BinaryAnnotation() _elem11.read(iprot) self.binary_annotations.append(_elem11) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('Span') if self.trace_id is not None: oprot.writeFieldBegin('trace_id', TType.I64, 1) oprot.writeI64(self.trace_id) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 3) oprot.writeString(self.name) oprot.writeFieldEnd() if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 4) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.parent_id is not None: oprot.writeFieldBegin('parent_id', TType.I64, 5) oprot.writeI64(self.parent_id) oprot.writeFieldEnd() if self.annotations is not None: oprot.writeFieldBegin('annotations', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.annotations)) for iter12 in self.annotations: iter12.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.binary_annotations is not None: oprot.writeFieldBegin('binary_annotations', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.binary_annotations)) for iter13 in self.binary_annotations: iter13.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) eventlet-0.30.2/eventlet/zipkin/api.py0000644000076500000240000001250414006212666020336 0ustar temotostaff00000000000000import os import sys import time import struct import socket import random from eventlet.green import threading from eventlet.zipkin._thrift.zipkinCore import ttypes from eventlet.zipkin._thrift.zipkinCore.constants import SERVER_SEND client = None _tls = threading.local() # thread local storage def put_annotation(msg, endpoint=None): """ This is annotation API. You can add your own annotation from in your code. Annotation is recorded with timestamp automatically. e.g.) put_annotation('cache hit for %s' % request) :param msg: String message :param endpoint: host info """ if is_sample(): a = ZipkinDataBuilder.build_annotation(msg, endpoint) trace_data = get_trace_data() trace_data.add_annotation(a) def put_key_value(key, value, endpoint=None): """ This is binary annotation API. You can add your own key-value extra information from in your code. Key-value doesn't have a time component. e.g.) put_key_value('http.uri', '/hoge/index.html') :param key: String :param value: String :param endpoint: host info """ if is_sample(): b = ZipkinDataBuilder.build_binary_annotation(key, value, endpoint) trace_data = get_trace_data() trace_data.add_binary_annotation(b) def is_tracing(): """ Return whether the current thread is tracking or not """ return hasattr(_tls, 'trace_data') def is_sample(): """ Return whether it should record trace information for the request or not """ return is_tracing() and _tls.trace_data.sampled def get_trace_data(): if is_tracing(): return _tls.trace_data def set_trace_data(trace_data): _tls.trace_data = trace_data def init_trace_data(): if is_tracing(): del _tls.trace_data def _uniq_id(): """ Create a random 64-bit signed integer appropriate for use as trace and span IDs. XXX: By experimentation zipkin has trouble recording traces with ids larger than (2 ** 56) - 1 """ return random.randint(0, (2 ** 56) - 1) def generate_trace_id(): return _uniq_id() def generate_span_id(): return _uniq_id() class TraceData(object): END_ANNOTATION = SERVER_SEND def __init__(self, name, trace_id, span_id, parent_id, sampled, endpoint): """ :param name: RPC name (String) :param trace_id: int :param span_id: int :param parent_id: int or None :param sampled: lets the downstream servers know if I should record trace data for the request (bool) :param endpoint: zipkin._thrift.zipkinCore.ttypes.EndPoint """ self.name = name self.trace_id = trace_id self.span_id = span_id self.parent_id = parent_id self.sampled = sampled self.endpoint = endpoint self.annotations = [] self.bannotations = [] self._done = False def add_annotation(self, annotation): if annotation.host is None: annotation.host = self.endpoint if not self._done: self.annotations.append(annotation) if annotation.value == self.END_ANNOTATION: self.flush() def add_binary_annotation(self, bannotation): if bannotation.host is None: bannotation.host = self.endpoint if not self._done: self.bannotations.append(bannotation) def flush(self): span = ZipkinDataBuilder.build_span(name=self.name, trace_id=self.trace_id, span_id=self.span_id, parent_id=self.parent_id, annotations=self.annotations, bannotations=self.bannotations) client.send_to_collector(span) self.annotations = [] self.bannotations = [] self._done = True class ZipkinDataBuilder: @staticmethod def build_span(name, trace_id, span_id, parent_id, annotations, bannotations): return ttypes.Span( name=name, trace_id=trace_id, id=span_id, parent_id=parent_id, annotations=annotations, binary_annotations=bannotations ) @staticmethod def build_annotation(value, endpoint=None): if isinstance(value, unicode): value = value.encode('utf-8') return ttypes.Annotation(time.time() * 1000 * 1000, str(value), endpoint) @staticmethod def build_binary_annotation(key, value, endpoint=None): annotation_type = ttypes.AnnotationType.STRING return ttypes.BinaryAnnotation(key, value, annotation_type, endpoint) @staticmethod def build_endpoint(ipv4=None, port=None, service_name=None): if ipv4 is not None: ipv4 = ZipkinDataBuilder._ipv4_to_int(ipv4) if service_name is None: service_name = ZipkinDataBuilder._get_script_name() return ttypes.Endpoint( ipv4=ipv4, port=port, service_name=service_name ) @staticmethod def _ipv4_to_int(ipv4): return struct.unpack('!i', socket.inet_aton(ipv4))[0] @staticmethod def _get_script_name(): return os.path.basename(sys.argv[0]) eventlet-0.30.2/eventlet/zipkin/client.py0000644000076500000240000000324414006212666021044 0ustar temotostaff00000000000000import base64 import warnings from scribe import scribe from thrift.transport import TTransport, TSocket from thrift.protocol import TBinaryProtocol from eventlet import GreenPile CATEGORY = 'zipkin' class ZipkinClient(object): def __init__(self, host='127.0.0.1', port=9410): """ :param host: zipkin collector IP addoress (default '127.0.0.1') :param port: zipkin collector port (default 9410) """ self.host = host self.port = port self.pile = GreenPile(1) self._connect() def _connect(self): socket = TSocket.TSocket(self.host, self.port) self.transport = TTransport.TFramedTransport(socket) protocol = TBinaryProtocol.TBinaryProtocol(self.transport, False, False) self.scribe_client = scribe.Client(protocol) try: self.transport.open() except TTransport.TTransportException as e: warnings.warn(e.message) def _build_message(self, thrift_obj): trans = TTransport.TMemoryBuffer() protocol = TBinaryProtocol.TBinaryProtocolAccelerated(trans=trans) thrift_obj.write(protocol) return base64.b64encode(trans.getvalue()) def send_to_collector(self, span): self.pile.spawn(self._send, span) def _send(self, span): log_entry = scribe.LogEntry(CATEGORY, self._build_message(span)) try: self.scribe_client.Log([log_entry]) except Exception as e: msg = 'ZipkinClient send error %s' % str(e) warnings.warn(msg) self._connect() def close(self): self.transport.close() eventlet-0.30.2/eventlet/zipkin/greenthread.py0000644000076500000240000000151314006212666022053 0ustar temotostaff00000000000000from eventlet import greenthread from eventlet.zipkin import api __original_init__ = greenthread.GreenThread.__init__ __original_main__ = greenthread.GreenThread.main def _patched__init(self, parent): # parent thread saves current TraceData from tls to self if api.is_tracing(): self.trace_data = api.get_trace_data() __original_init__(self, parent) def _patched_main(self, function, args, kwargs): # child thread inherits TraceData if hasattr(self, 'trace_data'): api.set_trace_data(self.trace_data) __original_main__(self, function, args, kwargs) def patch(): greenthread.GreenThread.__init__ = _patched__init greenthread.GreenThread.main = _patched_main def unpatch(): greenthread.GreenThread.__init__ = __original_init__ greenthread.GreenThread.main = __original_main__ eventlet-0.30.2/eventlet/zipkin/http.py0000644000076500000240000000334714006212666020551 0ustar temotostaff00000000000000import warnings import six from eventlet.green import httplib from eventlet.zipkin import api # see https://twitter.github.io/zipkin/Instrumenting.html HDR_TRACE_ID = 'X-B3-TraceId' HDR_SPAN_ID = 'X-B3-SpanId' HDR_PARENT_SPAN_ID = 'X-B3-ParentSpanId' HDR_SAMPLED = 'X-B3-Sampled' if six.PY2: __org_endheaders__ = httplib.HTTPConnection.endheaders __org_begin__ = httplib.HTTPResponse.begin def _patched_endheaders(self): if api.is_tracing(): trace_data = api.get_trace_data() new_span_id = api.generate_span_id() self.putheader(HDR_TRACE_ID, hex_str(trace_data.trace_id)) self.putheader(HDR_SPAN_ID, hex_str(new_span_id)) self.putheader(HDR_PARENT_SPAN_ID, hex_str(trace_data.span_id)) self.putheader(HDR_SAMPLED, int(trace_data.sampled)) api.put_annotation('Client Send') __org_endheaders__(self) def _patched_begin(self): __org_begin__(self) if api.is_tracing(): api.put_annotation('Client Recv (%s)' % self.status) def patch(): if six.PY2: httplib.HTTPConnection.endheaders = _patched_endheaders httplib.HTTPResponse.begin = _patched_begin if six.PY3: warnings.warn("Since current Python thrift release \ doesn't support Python 3, eventlet.zipkin.http \ doesn't also support Python 3 (http.client)") def unpatch(): if six.PY2: httplib.HTTPConnection.endheaders = __org_endheaders__ httplib.HTTPResponse.begin = __org_begin__ if six.PY3: pass def hex_str(n): """ Thrift uses a binary representation of trace and span ids HTTP headers use a hexadecimal representation of the same """ return '%0.16x' % (n,) eventlet-0.30.2/eventlet/zipkin/log.py0000644000076500000240000000052114006212666020342 0ustar temotostaff00000000000000import logging from eventlet.zipkin import api __original_handle__ = logging.Logger.handle def _patched_handle(self, record): __original_handle__(self, record) api.put_annotation(record.getMessage()) def patch(): logging.Logger.handle = _patched_handle def unpatch(): logging.Logger.handle = __original_handle__ eventlet-0.30.2/eventlet/zipkin/patcher.py0000644000076500000240000000255414006212666021217 0ustar temotostaff00000000000000from eventlet.zipkin import http from eventlet.zipkin import wsgi from eventlet.zipkin import greenthread from eventlet.zipkin import log from eventlet.zipkin import api from eventlet.zipkin.client import ZipkinClient def enable_trace_patch(host='127.0.0.1', port=9410, trace_app_log=False, sampling_rate=1.0): """ Apply monkey patch to trace your WSGI application. :param host: Scribe daemon IP address (default: '127.0.0.1') :param port: Scribe daemon port (default: 9410) :param trace_app_log: A Boolean indicating if the tracer will trace application log together or not. This facility assume that your application uses python standard logging library. (default: False) :param sampling_rate: A Float value (0.0~1.0) that indicates the tracing frequency. If you specify 1.0, all request are traced (and sent to Zipkin collecotr). If you specify 0.1, only 1/10 requests are traced. (default: 1.0) """ api.client = ZipkinClient(host, port) # monkey patch for adding tracing facility wsgi.patch(sampling_rate) http.patch() greenthread.patch() # monkey patch for capturing application log if trace_app_log: log.patch() def disable_trace_patch(): http.unpatch() wsgi.unpatch() greenthread.unpatch() log.unpatch() api.client.close() eventlet-0.30.2/eventlet/zipkin/wsgi.py0000644000076500000240000000434414006212666020541 0ustar temotostaff00000000000000import random from eventlet import wsgi from eventlet.zipkin import api from eventlet.zipkin._thrift.zipkinCore.constants import \ SERVER_RECV, SERVER_SEND from eventlet.zipkin.http import \ HDR_TRACE_ID, HDR_SPAN_ID, HDR_PARENT_SPAN_ID, HDR_SAMPLED _sampler = None __original_handle_one_response__ = wsgi.HttpProtocol.handle_one_response def _patched_handle_one_response(self): api.init_trace_data() trace_id = int_or_none(self.headers.getheader(HDR_TRACE_ID)) span_id = int_or_none(self.headers.getheader(HDR_SPAN_ID)) parent_id = int_or_none(self.headers.getheader(HDR_PARENT_SPAN_ID)) sampled = bool_or_none(self.headers.getheader(HDR_SAMPLED)) if trace_id is None: # front-end server trace_id = span_id = api.generate_trace_id() parent_id = None sampled = _sampler.sampling() ip, port = self.request.getsockname()[:2] ep = api.ZipkinDataBuilder.build_endpoint(ip, port) trace_data = api.TraceData(name=self.command, trace_id=trace_id, span_id=span_id, parent_id=parent_id, sampled=sampled, endpoint=ep) api.set_trace_data(trace_data) api.put_annotation(SERVER_RECV) api.put_key_value('http.uri', self.path) __original_handle_one_response__(self) if api.is_sample(): api.put_annotation(SERVER_SEND) class Sampler(object): def __init__(self, sampling_rate): self.sampling_rate = sampling_rate def sampling(self): # avoid generating unneeded random numbers if self.sampling_rate == 1.0: return True r = random.random() if r < self.sampling_rate: return True return False def int_or_none(val): if val is None: return None return int(val, 16) def bool_or_none(val): if val == '1': return True if val == '0': return False return None def patch(sampling_rate): global _sampler _sampler = Sampler(sampling_rate) wsgi.HttpProtocol.handle_one_response = _patched_handle_one_response def unpatch(): wsgi.HttpProtocol.handle_one_response = __original_handle_one_response__ eventlet-0.30.2/eventlet.egg-info/0000755000076500000240000000000014017673044017402 5ustar temotostaff00000000000000eventlet-0.30.2/eventlet.egg-info/PKG-INFO0000644000076500000240000001100714017673043020475 0ustar temotostaff00000000000000Metadata-Version: 1.1 Name: eventlet Version: 0.30.2 Summary: Highly concurrent networking library Home-page: http://eventlet.net Author: Linden Lab Author-email: eventletdev@lists.secondlife.com License: UNKNOWN Description: Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it. It uses epoll or libevent for highly scalable non-blocking I/O. Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O. The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application. It's easy to get started using Eventlet, and easy to convert existing applications to use it. Start off by looking at the `examples`_, `common design patterns`_, and the list of `basic API primitives`_. .. _examples: http://eventlet.net/doc/examples.html .. _common design patterns: http://eventlet.net/doc/design_patterns.html .. _basic API primitives: http://eventlet.net/doc/basic_usage.html Quick Example =============== Here's something you can try right on the command line:: % python3 >>> import eventlet >>> from eventlet.green.urllib.request import urlopen >>> gt = eventlet.spawn(urlopen, 'http://eventlet.net') >>> gt2 = eventlet.spawn(urlopen, 'http://secondlife.com') >>> gt2.wait() >>> gt.wait() Getting Eventlet ================== The easiest way to get Eventlet is to use pip:: pip install -U eventlet To install latest development version once:: pip install -U https://github.com/eventlet/eventlet/archive/master.zip Building the Docs Locally ========================= To build a complete set of HTML documentation, you must have Sphinx, which can be found at http://sphinx.pocoo.org/ (or installed with `pip install Sphinx`):: cd doc make html The built html files can be found in doc/_build/html afterward. Twisted ======= Eventlet had Twisted hub in the past, but community interest to this integration has dropped over time, now it is not supported, so with apologies for any inconvenience we discontinue Twisted integration. If you have a project that uses Eventlet with Twisted, your options are: * use last working release eventlet==0.14 * start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13, `EVENTLET_HUB` environment variable can point to external modules. * fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project. Apologies for any inconvenience. Supported Python versions ========================= Currently CPython 2.7 and 3.4+ are supported, but **2.7 and 3.4 support is deprecated and will be removed in the future**, only CPython 3.5+ support will remain. Flair ===== .. image:: https://img.shields.io/pypi/v/eventlet :target: https://pypi.org/project/eventlet/ .. image:: https://travis-ci.org/eventlet/eventlet.svg?branch=master :target: https://travis-ci.org/eventlet/eventlet .. image:: https://codecov.io/gh/eventlet/eventlet/branch/master/graph/badge.svg :target: https://codecov.io/gh/eventlet/eventlet Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: MacOS :: MacOS X Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python Classifier: Topic :: Internet Classifier: Topic :: Software Development :: Libraries :: Python Modules eventlet-0.30.2/eventlet.egg-info/SOURCES.txt0000644000076500000240000001717114017673043021274 0ustar temotostaff00000000000000AUTHORS LICENSE MANIFEST.in NEWS README.rst setup.cfg setup.py benchmarks/__init__.py benchmarks/localhost_socket.py benchmarks/spawn.py doc/Makefile doc/authors.rst doc/basic_usage.rst doc/common.txt doc/conf.py doc/design_patterns.rst doc/environment.rst doc/examples.rst doc/history.rst doc/hubs.rst doc/index.rst doc/modules.rst doc/patching.rst doc/ssl.rst doc/testing.rst doc/threading.rst doc/zeromq.rst doc/images/threading_illustration.png doc/modules/backdoor.rst doc/modules/corolocal.rst doc/modules/dagpool.rst doc/modules/db_pool.rst doc/modules/debug.rst doc/modules/event.rst doc/modules/greenpool.rst doc/modules/greenthread.rst doc/modules/pools.rst doc/modules/queue.rst doc/modules/semaphore.rst doc/modules/timeout.rst doc/modules/websocket.rst doc/modules/wsgi.rst doc/modules/zmq.rst eventlet/__init__.py eventlet/backdoor.py eventlet/convenience.py eventlet/corolocal.py eventlet/coros.py eventlet/dagpool.py eventlet/db_pool.py eventlet/debug.py eventlet/event.py eventlet/greenpool.py eventlet/greenthread.py eventlet/patcher.py eventlet/pools.py eventlet/queue.py eventlet/semaphore.py eventlet/timeout.py eventlet/tpool.py eventlet/websocket.py eventlet/wsgi.py eventlet.egg-info/PKG-INFO eventlet.egg-info/SOURCES.txt eventlet.egg-info/dependency_links.txt eventlet.egg-info/not-zip-safe eventlet.egg-info/requires.txt eventlet.egg-info/top_level.txt eventlet/green/BaseHTTPServer.py eventlet/green/CGIHTTPServer.py eventlet/green/MySQLdb.py eventlet/green/Queue.py eventlet/green/SimpleHTTPServer.py eventlet/green/SocketServer.py eventlet/green/__init__.py eventlet/green/_socket_nodns.py eventlet/green/asynchat.py eventlet/green/asyncore.py eventlet/green/builtin.py eventlet/green/ftplib.py eventlet/green/httplib.py eventlet/green/os.py eventlet/green/profile.py eventlet/green/select.py eventlet/green/selectors.py eventlet/green/socket.py eventlet/green/ssl.py eventlet/green/subprocess.py eventlet/green/thread.py eventlet/green/threading.py eventlet/green/time.py eventlet/green/urllib2.py eventlet/green/zmq.py eventlet/green/OpenSSL/SSL.py eventlet/green/OpenSSL/__init__.py eventlet/green/OpenSSL/crypto.py eventlet/green/OpenSSL/tsafe.py eventlet/green/OpenSSL/version.py eventlet/green/http/__init__.py eventlet/green/http/client.py eventlet/green/http/cookiejar.py eventlet/green/http/cookies.py eventlet/green/http/server.py eventlet/green/urllib/__init__.py eventlet/green/urllib/error.py eventlet/green/urllib/parse.py eventlet/green/urllib/request.py eventlet/green/urllib/response.py eventlet/greenio/__init__.py eventlet/greenio/base.py eventlet/greenio/py2.py eventlet/greenio/py3.py eventlet/hubs/__init__.py eventlet/hubs/epolls.py eventlet/hubs/hub.py eventlet/hubs/kqueue.py eventlet/hubs/poll.py eventlet/hubs/pyevent.py eventlet/hubs/selects.py eventlet/hubs/timer.py eventlet/support/__init__.py eventlet/support/greendns.py eventlet/support/greenlets.py eventlet/support/psycopg2_patcher.py eventlet/support/pylib.py eventlet/support/stacklesspypys.py eventlet/support/stacklesss.py eventlet/zipkin/__init__.py eventlet/zipkin/api.py eventlet/zipkin/client.py eventlet/zipkin/greenthread.py eventlet/zipkin/http.py eventlet/zipkin/log.py eventlet/zipkin/patcher.py eventlet/zipkin/wsgi.py eventlet/zipkin/_thrift/__init__.py eventlet/zipkin/_thrift/zipkinCore/__init__.py eventlet/zipkin/_thrift/zipkinCore/constants.py eventlet/zipkin/_thrift/zipkinCore/ttypes.py examples/chat_bridge.py examples/chat_server.py examples/connect.py examples/distributed_websocket_chat.py examples/echoserver.py examples/feedscraper-testclient.py examples/feedscraper.py examples/forwarder.py examples/producer_consumer.py examples/recursive_crawler.py examples/webcrawler.py examples/websocket.html examples/websocket.py examples/websocket_chat.html examples/websocket_chat.py examples/wsgi.py examples/zmq_chat.py examples/zmq_simple.py tests/__init__.py tests/api_test.py tests/backdoor_test.py tests/convenience_test.py tests/dagpool_test.py tests/db_pool_test.py tests/debug_test.py tests/env_test.py tests/event_test.py tests/green_http_test.py tests/green_profile_test.py tests/green_select_test.py tests/greendns_test.py tests/greenio_test.py tests/greenpool_test.py tests/greenthread_test.py tests/hub_test.py tests/mock.py tests/mysqldb_test.py tests/nosewrapper.py tests/openssl_test.py tests/os_test.py tests/parse_results.py tests/patcher_psycopg_test.py tests/patcher_test.py tests/pools_test.py tests/queue_test.py tests/semaphore_test.py tests/socket_test.py tests/ssl_test.py tests/subprocess_test.py tests/test__event.py tests/test__greenness.py tests/test__refcount.py tests/test__socket_errors.py tests/test_server.crt tests/test_server.key tests/thread_test.py tests/timeout_test.py tests/timeout_test_with_statement.py tests/timer_test.py tests/tpool_test.py tests/websocket_new_test.py tests/websocket_test.py tests/wsgi_test.py tests/zmq_test.py tests/isolated/__init__.py tests/isolated/env_tpool_negative.py tests/isolated/env_tpool_size.py tests/isolated/env_tpool_zero.py tests/isolated/green_http_doesnt_change_original_module.py tests/isolated/green_httplib_doesnt_change_original_module.py tests/isolated/green_ssl_py36_properties.py tests/isolated/greendns_from_address_203.py tests/isolated/greendns_import_rdtypes_then_eventlet.py tests/isolated/greenio_double_close_219.py tests/isolated/hub_fork.py tests/isolated/hub_fork_simple.py tests/isolated/hub_kqueue_unsupported.py tests/isolated/hub_use_hub_class.py tests/isolated/mysqldb_monkey_patch.py tests/isolated/patcher_blocking_select_methods_are_deleted.py tests/isolated/patcher_builtin.py tests/isolated/patcher_existing_locks_early.py tests/isolated/patcher_existing_locks_late.py tests/isolated/patcher_existing_locks_locked.py tests/isolated/patcher_existing_locks_unlocked.py tests/isolated/patcher_fork_after_monkey_patch.py tests/isolated/patcher_import_patched_defaults.py tests/isolated/patcher_importlib_lock.py tests/isolated/patcher_open_kwargs.py tests/isolated/patcher_socketserver_selectors.py tests/isolated/patcher_threading_condition.py tests/isolated/patcher_threading_current.py tests/isolated/patcher_threading_join.py tests/isolated/patcher_threadpoolexecutor.py tests/isolated/regular_file_readall.py tests/isolated/socket_resolve_green.py tests/isolated/subprocess_exception_identity.py tests/isolated/subprocess_patched_communicate.py tests/isolated/tpool_exception_leak.py tests/isolated/tpool_isolate_socket_default_timeout.py tests/isolated/wsgi_connection_timeout.py tests/isolated/test_sub_module_in_import_patched/__init__.py tests/isolated/test_sub_module_in_import_patched/test.py tests/isolated/test_sub_module_in_import_patched/sample_main_module/__init__.py tests/isolated/test_sub_module_in_import_patched/sample_main_module/sample_sub_module/__init__.py tests/manual/__init__.py tests/manual/greenio_memtest.py tests/manual/regress-226-unpatched-ssl.py tests/manual/websocket-gunicorn.py tests/patcher/__init__.py tests/patcher/shared1.py tests/patcher/shared_import_socket.py tests/stdlib/all.py tests/stdlib/all_modules.py tests/stdlib/all_monkey.py tests/stdlib/test_SimpleHTTPServer.py tests/stdlib/test_asynchat.py tests/stdlib/test_asyncore.py tests/stdlib/test_ftplib.py tests/stdlib/test_httplib.py tests/stdlib/test_httpservers.py tests/stdlib/test_os.py tests/stdlib/test_queue.py tests/stdlib/test_select.py tests/stdlib/test_socket.py tests/stdlib/test_socket_ssl.py tests/stdlib/test_socketserver.py tests/stdlib/test_ssl.py tests/stdlib/test_subprocess.py tests/stdlib/test_thread.py tests/stdlib/test_thread__boundedsem.py tests/stdlib/test_threading.py tests/stdlib/test_threading_local.py tests/stdlib/test_timeout.py tests/stdlib/test_urllib.py tests/stdlib/test_urllib2.py tests/stdlib/test_urllib2_localnet.pyeventlet-0.30.2/eventlet.egg-info/dependency_links.txt0000644000076500000240000000000114017673043023447 0ustar temotostaff00000000000000 eventlet-0.30.2/eventlet.egg-info/not-zip-safe0000644000076500000240000000000113031326661021623 0ustar temotostaff00000000000000 eventlet-0.30.2/eventlet.egg-info/requires.txt0000644000076500000240000000013514017673043022000 0ustar temotostaff00000000000000dnspython<2.0.0,>=1.15.0 greenlet>=0.3 six>=1.10.0 [:python_version < "3.5"] monotonic>=1.4 eventlet-0.30.2/eventlet.egg-info/top_level.txt0000644000076500000240000000001114017673043022123 0ustar temotostaff00000000000000eventlet eventlet-0.30.2/examples/0000755000076500000240000000000014017673044015700 5ustar temotostaff00000000000000eventlet-0.30.2/examples/chat_bridge.py0000644000076500000240000000102714006212666020502 0ustar temotostaff00000000000000import sys from zmq import FORWARDER, PUB, SUB, SUBSCRIBE from zmq.devices import Device if __name__ == "__main__": usage = 'usage: chat_bridge sub_address pub_address' if len(sys.argv) != 3: print(usage) sys.exit(1) sub_addr = sys.argv[1] pub_addr = sys.argv[2] print("Recieving on %s" % sub_addr) print("Sending on %s" % pub_addr) device = Device(FORWARDER, SUB, PUB) device.bind_in(sub_addr) device.setsockopt_in(SUBSCRIBE, "") device.bind_out(pub_addr) device.start() eventlet-0.30.2/examples/chat_server.py0000644000076500000240000000225314006212666020556 0ustar temotostaff00000000000000import eventlet from eventlet.green import socket PORT = 3001 participants = set() def read_chat_forever(writer, reader): line = reader.readline() while line: print("Chat:", line.strip()) for p in participants: try: if p is not writer: # Don't echo p.write(line) p.flush() except socket.error as e: # ignore broken pipes, they just mean the participant # closed its connection already if e[0] != 32: raise line = reader.readline() participants.remove(writer) print("Participant left chat.") try: print("ChatServer starting up on port %s" % PORT) server = eventlet.listen(('0.0.0.0', PORT)) while True: new_connection, address = server.accept() print("Participant joined chat.") new_writer = new_connection.makefile('w') participants.add(new_writer) eventlet.spawn_n(read_chat_forever, new_writer, new_connection.makefile('r')) except (KeyboardInterrupt, SystemExit): print("ChatServer exiting.") eventlet-0.30.2/examples/connect.py0000644000076500000240000000136714006212666017707 0ustar temotostaff00000000000000"""Spawn multiple workers and collect their results. Demonstrates how to use the eventlet.green.socket module. """ from __future__ import print_function import eventlet from eventlet.green import socket def geturl(url): c = socket.socket() ip = socket.gethostbyname(url) c.connect((ip, 80)) print('%s connected' % url) c.sendall('GET /\r\n\r\n') return c.recv(1024) urls = ['www.google.com', 'www.yandex.ru', 'www.python.org'] pile = eventlet.GreenPile() for x in urls: pile.spawn(geturl, x) # note that the pile acts as a collection of return values from the functions # if any exceptions are raised by the function they'll get raised here for url, result in zip(urls, pile): print('%s: %s' % (url, repr(result)[:50])) eventlet-0.30.2/examples/distributed_websocket_chat.py0000644000076500000240000000744214006212666023645 0ustar temotostaff00000000000000"""This is a websocket chat example with many servers. A client can connect to any of the servers and their messages will be received by all clients connected to any of the servers. Run the examples like this: $ python examples/chat_bridge.py tcp://127.0.0.1:12345 tcp://127.0.0.1:12346 and the servers like this (changing the port for each one obviously): $ python examples/distributed_websocket_chat.py -p tcp://127.0.0.1:12345 -s tcp://127.0.0.1:12346 7000 So all messages are published to port 12345 and the device forwards all the messages to 12346 where they are subscribed to """ import os import sys import eventlet from collections import defaultdict from eventlet import spawn_n, sleep from eventlet import wsgi from eventlet import websocket from eventlet.green import zmq from eventlet.hubs import get_hub, use_hub from uuid import uuid1 use_hub('zeromq') ctx = zmq.Context() class IDName(object): def __init__(self): self.id = uuid1() self.name = None def __str__(self): if self.name: return self.name else: return str(self.id) def pack_message(self, msg): return self, msg def unpack_message(self, msg): sender, message = msg sender_name = 'you said' if sender.id == self.id \ else '%s says' % sender return "%s: %s" % (sender_name, message) participants = defaultdict(IDName) def subscribe_and_distribute(sub_socket): global participants while True: msg = sub_socket.recv_pyobj() for ws, name_id in participants.items(): to_send = name_id.unpack_message(msg) if to_send: try: ws.send(to_send) except: del participants[ws] @websocket.WebSocketWSGI def handle(ws): global pub_socket name_id = participants[ws] ws.send("Connected as %s, change name with 'name: new_name'" % name_id) try: while True: m = ws.wait() if m is None: break if m.startswith('name:'): old_name = str(name_id) new_name = m.split(':', 1)[1].strip() name_id.name = new_name m = 'Changed name from %s' % old_name pub_socket.send_pyobj(name_id.pack_message(m)) sleep() finally: del participants[ws] def dispatch(environ, start_response): """Resolves to the web page or the websocket depending on the path.""" global port if environ['PATH_INFO'] == '/chat': return handle(environ, start_response) else: start_response('200 OK', [('content-type', 'text/html')]) return [open(os.path.join( os.path.dirname(__file__), 'websocket_chat.html')).read() % dict(port=port)] port = None if __name__ == "__main__": usage = 'usage: websocket_chat -p pub address -s sub address port number' if len(sys.argv) != 6: print(usage) sys.exit(1) pub_addr = sys.argv[2] sub_addr = sys.argv[4] try: port = int(sys.argv[5]) except ValueError: print("Error port supplied couldn't be converted to int\n", usage) sys.exit(1) try: pub_socket = ctx.socket(zmq.PUB) pub_socket.connect(pub_addr) print("Publishing to %s" % pub_addr) sub_socket = ctx.socket(zmq.SUB) sub_socket.connect(sub_addr) sub_socket.setsockopt(zmq.SUBSCRIBE, "") print("Subscribing to %s" % sub_addr) except: print("Couldn't create sockets\n", usage) sys.exit(1) spawn_n(subscribe_and_distribute, sub_socket) listener = eventlet.listen(('127.0.0.1', port)) print("\nVisit http://localhost:%s/ in your websocket-capable browser.\n" % port) wsgi.server(listener, dispatch) eventlet-0.30.2/examples/echoserver.py0000644000076500000240000000170714006212666020421 0ustar temotostaff00000000000000#! /usr/bin/env python """\ Simple server that listens on port 6000 and echos back every input to the client. To try out the server, start it up by running this file. Connect to it with: telnet localhost 6000 You terminate your connection by terminating telnet (typically Ctrl-] and then 'quit') """ from __future__ import print_function import eventlet def handle(fd): print("client connected") while True: # pass through every non-eof line x = fd.readline() if not x: break fd.write(x) fd.flush() print("echoed", x, end=' ') print("client disconnected") print("server socket listening on port 6000") server = eventlet.listen(('0.0.0.0', 6000)) pool = eventlet.GreenPool() while True: try: new_sock, address = server.accept() print("accepted", address) pool.spawn_n(handle, new_sock.makefile('rw')) except (SystemExit, KeyboardInterrupt): break eventlet-0.30.2/examples/feedscraper-testclient.py0000644000076500000240000000146314006212666022712 0ustar temotostaff00000000000000from eventlet.green.urllib.request import urlopen big_list_of_feeds = """ http://blog.eventlet.net/feed/ http://rss.slashdot.org/Slashdot/slashdot http://feeds.boingboing.net/boingboing/iBag http://feeds.feedburner.com/RockPaperShotgun http://feeds.penny-arcade.com/pa-mainsite http://achewood.com/rss.php http://raysmuckles.blogspot.com/atom.xml http://rbeef.blogspot.com/atom.xml http://journeyintoreason.blogspot.com/atom.xml http://orezscu.blogspot.com/atom.xml http://feeds2.feedburner.com/AskMetafilter http://feeds2.feedburner.com/Metafilter http://stackoverflow.com/feeds http://feeds.feedburner.com/codinghorror http://www.tbray.org/ongoing/ongoing.atom http://www.zeldman.com/feed/ http://ln.hixie.ch/rss/html """ url = 'http://localhost:9010/' result = urlopen(url, big_list_of_feeds) print(result.read()) eventlet-0.30.2/examples/feedscraper.py0000644000076500000240000000214514006212666020534 0ustar temotostaff00000000000000"""A simple web server that accepts POSTS containing a list of feed urls, and returns the titles of those feeds. """ import eventlet feedparser = eventlet.import_patched('feedparser') # the pool provides a safety limit on our concurrency pool = eventlet.GreenPool() def fetch_title(url): d = feedparser.parse(url) return d.feed.get('title', '') def app(environ, start_response): if environ['REQUEST_METHOD'] != 'POST': start_response('403 Forbidden', []) return [] # the pile collects the result of a concurrent operation -- in this case, # the collection of feed titles pile = eventlet.GreenPile(pool) for line in environ['wsgi.input'].readlines(): url = line.strip() if url: pile.spawn(fetch_title, url) # since the pile is an iterator over the results, # you can use it in all sorts of great Pythonic ways titles = '\n'.join(pile) start_response('200 OK', [('Content-type', 'text/plain')]) return [titles] if __name__ == '__main__': from eventlet import wsgi wsgi.server(eventlet.listen(('localhost', 9010)), app) eventlet-0.30.2/examples/forwarder.py0000644000076500000240000000155614006212666020251 0ustar temotostaff00000000000000""" This is an incredibly simple port forwarder from port 7000 to 22 on localhost. It calls a callback function when the socket is closed, to demonstrate one way that you could start to do interesting things by starting from a simple framework like this. """ import eventlet def closed_callback(): print("called back") def forward(source, dest, cb=lambda: None): """Forwards bytes unidirectionally from source to dest""" while True: d = source.recv(32384) if d == '': cb() break dest.sendall(d) listener = eventlet.listen(('localhost', 7000)) while True: client, addr = listener.accept() server = eventlet.connect(('localhost', 22)) # two unidirectional forwarders make a bidirectional one eventlet.spawn_n(forward, client, server, closed_callback) eventlet.spawn_n(forward, server, client) eventlet-0.30.2/examples/producer_consumer.py0000644000076500000240000000354314006212666022012 0ustar temotostaff00000000000000"""This is a recursive web crawler. Don't go pointing this at random sites; it doesn't respect robots.txt and it is pretty brutal about how quickly it fetches pages. This is a kind of "producer/consumer" example; the fetch function produces jobs, and the GreenPool itself is the consumer, farming out work concurrently. It's easier to write it this way rather than writing a standard consumer loop; GreenPool handles any exceptions raised and arranges so that there's a set number of "workers", so you don't have to write that tedious management code yourself. """ from eventlet.green.urllib.request import urlopen import eventlet import re # http://daringfireball.net/2009/11/liberal_regex_for_matching_urls url_regex = re.compile(r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|/)))') def fetch(url, outq): """Fetch a url and push any urls found into a queue.""" print("fetching", url) data = '' with eventlet.Timeout(5, False): data = urllib2.urlopen(url).read().decode() for url_match in url_regex.finditer(data): new_url = url_match.group(0) outq.put(new_url) def producer(start_url): """Recursively crawl starting from *start_url*. Returns a set of urls that were found.""" pool = eventlet.GreenPool() seen = set() q = eventlet.Queue() q.put(start_url) # keep looping if there are new urls, or workers that may produce more urls while True: while not q.empty(): url = q.get() # limit requests to eventlet.net so we don't crash all over the internet if url not in seen and 'eventlet.net' in url: seen.add(url) pool.spawn_n(fetch, url, q) pool.waitall() if q.empty(): break return seen seen = producer("http://eventlet.net") print("I saw these urls:") print("\n".join(seen)) eventlet-0.30.2/examples/recursive_crawler.py0000644000076500000240000000335214006212666022000 0ustar temotostaff00000000000000"""This is a recursive web crawler. Don't go pointing this at random sites; it doesn't respect robots.txt and it is pretty brutal about how quickly it fetches pages. The code for this is very short; this is perhaps a good indication that this is making the most effective use of the primitves at hand. The fetch function does all the work of making http requests, searching for new urls, and dispatching new fetches. The GreenPool acts as sort of a job coordinator (and concurrency controller of course). """ from eventlet.green.urllib.request import urlopen import eventlet import re # http://daringfireball.net/2009/11/liberal_regex_for_matching_urls url_regex = re.compile(r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|/)))') def fetch(url, seen, pool): """Fetch a url, stick any found urls into the seen set, and dispatch any new ones to the pool.""" print("fetching", url) data = '' with eventlet.Timeout(5, False): data = urlopen(url).read().decode() for url_match in url_regex.finditer(data): new_url = url_match.group(0) # only send requests to eventlet.net so as not to destroy the internet if new_url not in seen and 'eventlet.net' in new_url: seen.add(new_url) # while this seems stack-recursive, it's actually not: # spawned greenthreads start their own stacks pool.spawn_n(fetch, new_url, seen, pool) def crawl(start_url): """Recursively crawl starting from *start_url*. Returns a set of urls that were found.""" pool = eventlet.GreenPool() seen = set() fetch(start_url, seen, pool) pool.waitall() return seen seen = crawl("http://eventlet.net") print("I saw these urls:") print("\n".join(seen)) eventlet-0.30.2/examples/webcrawler.py0000644000076500000240000000150614006212666020406 0ustar temotostaff00000000000000#!/usr/bin/env python """ This is a simple web "crawler" that fetches a bunch of urls using a pool to control the number of outbound connections. It has as many simultaneously open connections as coroutines in the pool. The prints in the body of the fetch function are there to demonstrate that the requests are truly made in parallel. """ import eventlet from eventlet.green.urllib.request import urlopen urls = [ "https://www.google.com/intl/en_ALL/images/logo.gif", "http://python.org/images/python-logo.gif", "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif", ] def fetch(url): print("opening", url) body = urlopen(url).read() print("done with", url) return url, body pool = eventlet.GreenPool(200) for url, body in pool.imap(fetch, urls): print("got body from", url, "of length", len(body)) eventlet-0.30.2/examples/websocket.html0000644000076500000240000000300114006212666020543 0ustar temotostaff00000000000000

Plot

(Only tested in Chrome)

eventlet-0.30.2/examples/websocket.py0000644000076500000240000000233114006212666020234 0ustar temotostaff00000000000000import eventlet from eventlet import wsgi from eventlet import websocket import six # demo app import os import random @websocket.WebSocketWSGI def handle(ws): """ This is the websocket handler function. Note that we can dispatch based on path in here, too.""" if ws.path == '/echo': while True: m = ws.wait() if m is None: break ws.send(m) elif ws.path == '/data': for i in six.moves.range(10000): ws.send("0 %s %s\n" % (i, random.random())) eventlet.sleep(0.1) def dispatch(environ, start_response): """ This resolves to the web page or the websocket depending on the path.""" if environ['PATH_INFO'] == '/data': return handle(environ, start_response) else: start_response('200 OK', [('content-type', 'text/html')]) return [open(os.path.join( os.path.dirname(__file__), 'websocket.html')).read()] if __name__ == "__main__": # run an example app from the command line listener = eventlet.listen(('127.0.0.1', 7000)) print("\nVisit http://localhost:7000/ in your websocket-capable browser.\n") wsgi.server(listener, dispatch) eventlet-0.30.2/examples/websocket_chat.html0000644000076500000240000000146214006212666021553 0ustar temotostaff00000000000000

Chat!

(Only tested in Chrome)

eventlet-0.30.2/examples/websocket_chat.py0000644000076500000240000000204314006212666021233 0ustar temotostaff00000000000000import os import eventlet from eventlet import wsgi from eventlet import websocket PORT = 7000 participants = set() @websocket.WebSocketWSGI def handle(ws): participants.add(ws) try: while True: m = ws.wait() if m is None: break for p in participants: p.send(m) finally: participants.remove(ws) def dispatch(environ, start_response): """Resolves to the web page or the websocket depending on the path.""" if environ['PATH_INFO'] == '/chat': return handle(environ, start_response) else: start_response('200 OK', [('content-type', 'text/html')]) html_path = os.path.join(os.path.dirname(__file__), 'websocket_chat.html') return [open(html_path).read() % {'port': PORT}] if __name__ == "__main__": # run an example app from the command line listener = eventlet.listen(('127.0.0.1', PORT)) print("\nVisit http://localhost:7000/ in your websocket-capable browser.\n") wsgi.server(listener, dispatch) eventlet-0.30.2/examples/wsgi.py0000644000076500000240000000114114006212666017215 0ustar temotostaff00000000000000"""This is a simple example of running a wsgi application with eventlet. For a more fully-featured server which supports multiple processes, multiple threads, and graceful code reloading, see: http://pypi.python.org/pypi/Spawning/ """ import eventlet from eventlet import wsgi def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return ['Hello, World!\r\n'] wsgi.server(eventlet.listen(('', 8090)), hello_world) eventlet-0.30.2/examples/zmq_chat.py0000644000076500000240000000322214006212666020054 0ustar temotostaff00000000000000import eventlet import sys from eventlet.green import socket, zmq from eventlet.hubs import use_hub use_hub('zeromq') ADDR = 'ipc:///tmp/chat' ctx = zmq.Context() def publish(writer): print("connected") socket = ctx.socket(zmq.SUB) socket.setsockopt(zmq.SUBSCRIBE, "") socket.connect(ADDR) eventlet.sleep(0.1) while True: msg = socket.recv_pyobj() str_msg = "%s: %s" % msg writer.write(str_msg) writer.flush() PORT = 3001 def read_chat_forever(reader, pub_socket): line = reader.readline() who = 'someone' while line: print("Chat:", line.strip()) if line.startswith('name:'): who = line.split(':')[-1].strip() try: pub_socket.send_pyobj((who, line)) except socket.error as e: # ignore broken pipes, they just mean the participant # closed its connection already if e[0] != 32: raise line = reader.readline() print("Participant left chat.") try: print("ChatServer starting up on port %s" % PORT) server = eventlet.listen(('0.0.0.0', PORT)) pub_socket = ctx.socket(zmq.PUB) pub_socket.bind(ADDR) eventlet.spawn_n(publish, sys.stdout) while True: new_connection, address = server.accept() print("Participant joined chat.") eventlet.spawn_n(publish, new_connection.makefile('w')) eventlet.spawn_n(read_chat_forever, new_connection.makefile('r'), pub_socket) except (KeyboardInterrupt, SystemExit): print("ChatServer exiting.") eventlet-0.30.2/examples/zmq_simple.py0000644000076500000240000000131714006212666020431 0ustar temotostaff00000000000000from eventlet.green import zmq import eventlet CTX = zmq.Context(1) def bob_client(ctx, count): print("STARTING BOB") bob = zmq.Socket(CTX, zmq.REQ) bob.connect("ipc:///tmp/test") for i in range(0, count): print("BOB SENDING") bob.send("HI") print("BOB GOT:", bob.recv()) def alice_server(ctx, count): print("STARTING ALICE") alice = zmq.Socket(CTX, zmq.REP) alice.bind("ipc:///tmp/test") print("ALICE READY") for i in range(0, count): print("ALICE GOT:", alice.recv()) print("ALIC SENDING") alice.send("HI BACK") alice = eventlet.spawn(alice_server, CTX, 10) bob = eventlet.spawn(bob_client, CTX, 10) bob.wait() alice.wait() eventlet-0.30.2/setup.cfg0000644000076500000240000000015214017673044015701 0ustar temotostaff00000000000000[metadata] description-file = README.rst [wheel] universal = True [egg_info] tag_build = tag_date = 0 eventlet-0.30.2/setup.py0000644000076500000240000000300514006212666015567 0ustar temotostaff00000000000000#!/usr/bin/env python import os import setuptools os.environ.setdefault('EVENTLET_IMPORT_VERSION_ONLY', '1') import eventlet setuptools.setup( name='eventlet', version=eventlet.__version__, description='Highly concurrent networking library', author='Linden Lab', author_email='eventletdev@lists.secondlife.com', url='http://eventlet.net', packages=setuptools.find_packages(exclude=['benchmarks', 'tests', 'tests.*']), install_requires=( 'dnspython >= 1.15.0, < 2.0.0', 'greenlet >= 0.3', 'monotonic >= 1.4;python_version<"3.5"', 'six >= 1.10.0', ), zip_safe=False, long_description=open( os.path.join( os.path.dirname(__file__), 'README.rst' ) ).read(), test_suite='nose.collector', classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules", ] ) eventlet-0.30.2/tests/0000755000076500000240000000000014017673044015224 5ustar temotostaff00000000000000eventlet-0.30.2/tests/__init__.py0000644000076500000240000003676014006212666017346 0ustar temotostaff00000000000000# package is named tests, not test, so it won't be confused with test in stdlib from __future__ import print_function import contextlib import errno import functools import gc import json import os try: import resource except ImportError: resource = None import signal try: import subprocess32 as subprocess # py2 except ImportError: import subprocess # py3 import sys import unittest import warnings from nose.plugins.skip import SkipTest import eventlet from eventlet import tpool import six import socket from threading import Thread import struct # convenience for importers main = unittest.main @contextlib.contextmanager def assert_raises(exc_type): try: yield except exc_type: pass else: name = str(exc_type) try: name = exc_type.__name__ except AttributeError: pass assert False, 'Expected exception {0}'.format(name) def skipped(func, *decorator_args): """Decorator that marks a function as skipped. """ @functools.wraps(func) def wrapped(*a, **k): raise SkipTest(*decorator_args) return wrapped def skip_if(condition): """ Decorator that skips a test if the *condition* evaluates True. *condition* can be a boolean or a callable that accepts one argument. The callable will be called with the function to be decorated, and should return True to skip the test. """ def skipped_wrapper(func): @functools.wraps(func) def wrapped(*a, **kw): if isinstance(condition, bool): result = condition else: result = condition(func) if result: raise SkipTest() else: return func(*a, **kw) return wrapped return skipped_wrapper def skip_unless(condition): """ Decorator that skips a test if the *condition* does not return True. *condition* can be a boolean or a callable that accepts one argument. The callable will be called with the function to be decorated, and should return True if the condition is satisfied. """ def skipped_wrapper(func): @functools.wraps(func) def wrapped(*a, **kw): if isinstance(condition, bool): result = condition else: result = condition(func) if not result: raise SkipTest() else: return func(*a, **kw) return wrapped return skipped_wrapper def using_pyevent(_f): from eventlet.hubs import get_hub return 'pyevent' in type(get_hub()).__module__ def skip_with_pyevent(func): """ Decorator that skips a test if we're using the pyevent hub.""" return skip_if(using_pyevent)(func) def skip_on_windows(func): """ Decorator that skips a test on Windows.""" return skip_if(sys.platform.startswith('win'))(func) def skip_if_no_itimer(func): """ Decorator that skips a test if the `itimer` module isn't found """ has_itimer = False try: import itimer has_itimer = True except ImportError: pass return skip_unless(has_itimer)(func) def skip_if_CRLock_exist(func): """ Decorator that skips a test if the `_thread.RLock` class exists """ try: from _thread import RLock return skipped(func) except ImportError: return func def skip_if_no_ssl(func): """ Decorator that skips a test if SSL is not available.""" try: import eventlet.green.ssl return func except ImportError: try: import eventlet.green.OpenSSL return func except ImportError: return skipped(func) def skip_if_no_ipv6(func): if os.environ.get('eventlet_test_ipv6') != '1': return skipped(func) return func class TestIsTakingTooLong(Exception): """ Custom exception class to be raised when a test's runtime exceeds a limit. """ pass class LimitedTestCase(unittest.TestCase): """ Unittest subclass that adds a timeout to all tests. Subclasses must be sure to call the LimitedTestCase setUp and tearDown methods. The default timeout is 1 second, change it by setting TEST_TIMEOUT to the desired quantity.""" TEST_TIMEOUT = 1 def setUp(self): self.previous_alarm = None self.timer = eventlet.Timeout(self.TEST_TIMEOUT, TestIsTakingTooLong(self.TEST_TIMEOUT)) def reset_timeout(self, new_timeout): """Changes the timeout duration; only has effect during one test. `new_timeout` can be int or float. """ self.timer.cancel() self.timer = eventlet.Timeout(new_timeout, TestIsTakingTooLong(new_timeout)) def set_alarm(self, new_timeout): """Call this in the beginning of your test if you expect busy loops. Only has effect during one test. `new_timeout` must be int. """ def sig_alarm_handler(sig, frame): # Could arm previous alarm but test is failed anyway # seems to be no point in restoring previous state. raise TestIsTakingTooLong(new_timeout) self.previous_alarm = ( signal.signal(signal.SIGALRM, sig_alarm_handler), signal.alarm(new_timeout), ) def tearDown(self): self.timer.cancel() if self.previous_alarm: signal.signal(signal.SIGALRM, self.previous_alarm[0]) signal.alarm(self.previous_alarm[1]) tpool.killall() gc.collect() eventlet.sleep(0) verify_hub_empty() def assert_less_than(self, a, b, msg=None): msg = msg or "%s not less than %s" % (a, b) assert a < b, msg assertLessThan = assert_less_than def assert_less_than_equal(self, a, b, msg=None): msg = msg or "%s not less than or equal to %s" % (a, b) assert a <= b, msg assertLessThanEqual = assert_less_than_equal def check_idle_cpu_usage(duration, allowed_part): if resource is None: # TODO: use https://code.google.com/p/psutil/ from nose.plugins.skip import SkipTest raise SkipTest('CPU usage testing not supported (`import resource` failed)') r1 = resource.getrusage(resource.RUSAGE_SELF) eventlet.sleep(duration) r2 = resource.getrusage(resource.RUSAGE_SELF) utime = r2.ru_utime - r1.ru_utime stime = r2.ru_stime - r1.ru_stime # This check is reliably unreliable on Travis, presumably because of CPU # resources being quite restricted by the build environment. The workaround # is to apply an arbitrary factor that should be enough to make it work nicely. if os.environ.get('TRAVIS') == 'true': allowed_part *= 5 assert utime + stime < duration * allowed_part, \ "CPU usage over limit: user %.0f%% sys %.0f%% allowed %.0f%%" % ( utime / duration * 100, stime / duration * 100, allowed_part * 100) def verify_hub_empty(): def format_listener(listener): return 'Listener %r for greenlet %r with run callback %r' % ( listener, listener.greenlet, getattr(listener.greenlet, 'run', None)) from eventlet import hubs hub = hubs.get_hub() readers = hub.get_readers() writers = hub.get_writers() num_readers = len(readers) num_writers = len(writers) num_timers = hub.get_timers_count() assert num_readers == 0 and num_writers == 0, \ "Readers: %s (%d) Writers: %s (%d)" % ( ', '.join(map(format_listener, readers)), num_readers, ', '.join(map(format_listener, writers)), num_writers, ) def find_command(command): for dir in os.getenv('PATH', '/usr/bin:/usr/sbin').split(os.pathsep): p = os.path.join(dir, command) if os.access(p, os.X_OK): return p raise IOError(errno.ENOENT, 'Command not found: %r' % command) def silence_warnings(func): def wrapper(*args, **kw): warnings.simplefilter('ignore', DeprecationWarning) try: return func(*args, **kw) finally: warnings.simplefilter('default', DeprecationWarning) wrapper.__name__ = func.__name__ return wrapper def get_database_auth(): """Retrieves a dict of connection parameters for connecting to test databases. Authentication parameters are highly-machine specific, so get_database_auth gets its information from either environment variables or a config file. The environment variable is "EVENTLET_DB_TEST_AUTH" and it should contain a json object. If this environment variable is present, it's used and config files are ignored. If it's not present, it looks in the local directory (tests) and in the user's home directory for a file named ".test_dbauth", which contains a json map of parameters to the connect function. """ retval = { 'MySQLdb': {'host': 'localhost', 'user': 'root', 'passwd': ''}, 'psycopg2': {'user': 'test'}, } if 'EVENTLET_DB_TEST_AUTH' in os.environ: return json.loads(os.environ.get('EVENTLET_DB_TEST_AUTH')) files = [os.path.join(os.path.dirname(__file__), '.test_dbauth'), os.path.join(os.path.expanduser('~'), '.test_dbauth')] for f in files: try: auth_utf8 = json.load(open(f)) # Have to convert unicode objects to str objects because # mysqldb is dumb. Using a doubly-nested list comprehension # because we know that the structure is a two-level dict. return dict( [(str(modname), dict( [(str(k), str(v)) for k, v in connectargs.items()])) for modname, connectargs in auth_utf8.items()]) except IOError: pass return retval def run_python(path, env=None, args=None, timeout=None, pythonpath_extend=None, expect_pass=False): new_argv = [sys.executable] if sys.version_info[:2] <= (2, 7): new_argv += ['-W', 'ignore:Python 2 is no longer supported'] new_env = os.environ.copy() new_env.setdefault('eventlet_test_in_progress', 'yes') src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if path: path = os.path.abspath(path) new_argv.append(path) new_env['PYTHONPATH'] = os.pathsep.join(sys.path + [src_dir]) if env: new_env.update(env) if pythonpath_extend: new_path = [p for p in new_env.get('PYTHONPATH', '').split(os.pathsep) if p] new_path.extend( p if os.path.isabs(p) else os.path.join(src_dir, p) for p in pythonpath_extend ) new_env['PYTHONPATH'] = os.pathsep.join(new_path) if args: new_argv.extend(args) p = subprocess.Popen( new_argv, env=new_env, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, stdout=subprocess.PIPE, ) if timeout is None: timeout = 10 try: output, _ = p.communicate(timeout=timeout) except subprocess.TimeoutExpired: p.kill() output, _ = p.communicate(timeout=timeout) if expect_pass: sys.stderr.write('Program {0} output:\n---\n{1}\n---\n'.format(path, output.decode())) assert False, 'timed out' return '{0}\nFAIL - timed out'.format(output).encode() if expect_pass: if output.startswith(b'skip'): parts = output.rstrip().split(b':', 1) skip_args = [] if len(parts) > 1: skip_args.append(parts[1]) raise SkipTest(*skip_args) ok = output.rstrip() == b'pass' if not ok: sys.stderr.write('Program {0} output:\n---\n{1}\n---\n'.format(path, output.decode())) assert ok, 'Expected single line "pass" in stdout' return output def run_isolated(path, prefix='tests/isolated/', **kwargs): kwargs.setdefault('expect_pass', True) run_python(prefix + path, **kwargs) def check_is_timeout(obj): value_text = getattr(obj, 'is_timeout', '(missing)') assert obj.is_timeout, 'type={0} str={1} .is_timeout={2}'.format(type(obj), str(obj), value_text) @contextlib.contextmanager def capture_stderr(): stream = six.StringIO() original = sys.stderr try: sys.stderr = stream yield stream finally: sys.stderr = original stream.seek(0) certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt') private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key') def test_run_python_timeout(): output = run_python('', args=('-c', 'import time; time.sleep(0.5)'), timeout=0.1) assert output.endswith(b'FAIL - timed out') def test_run_python_pythonpath_extend(): code = '''import os, sys ; print('\\n'.join(sys.path))''' output = run_python('', args=('-c', code), pythonpath_extend=('dira', 'dirb')) assert b'/dira\n' in output assert b'/dirb\n' in output @contextlib.contextmanager def dns_tcp_server(ip_to_give, request_count=1): state = [0] # request count storage writable by thread host = "localhost" death_pill = b"DEATH_PILL" def extract_domain(data): domain = b'' kind = (data[4] >> 3) & 15 # Opcode bits if kind == 0: # Standard query ini = 14 length = data[ini] while length != 0: domain += data[ini + 1:ini + length + 1] + b'.' ini += length + 1 length = data[ini] return domain def answer(data, domain): domain_length = len(domain) packet = b'' if domain: # If an ip was given we return it in the answer if ip_to_give: packet += data[2:4] + b'\x81\x80' packet += data[6:8] + data[6:8] + b'\x00\x00\x00\x00' # Questions and answers counts packet += data[14: 14 + domain_length + 1] # Original domain name question packet += b'\x00\x01\x00\x01' # Type and class packet += b'\xc0\x0c\x00\x01' # TTL packet += b'\x00\x01' packet += b'\x00\x00\x00\x08' packet += b'\x00\x04' # Resource data length -> 4 bytes packet += bytearray(int(x) for x in ip_to_give.split(".")) else: packet += data[2:4] + b'\x85\x80' packet += data[6:8] + b'\x00\x00' + b'\x00\x00\x00\x00' # Questions and answers counts packet += data[14: 14 + domain_length + 1] # Original domain name question packet += b'\x00\x01\x00\x01' # Type and class sz = struct.pack('>H', len(packet)) return sz + packet def serve(server_socket): # thread target client_sock, address = server_socket.accept() state[0] += 1 if state[0] <= request_count: data = bytearray(client_sock.recv(1024)) if data == death_pill: client_sock.close() return domain = extract_domain(data) client_sock.sendall(answer(data, domain)) client_sock.close() # Server starts server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind((host, 0)) server_socket.listen(5) server_addr = server_socket.getsockname() thread = Thread(target=serve, args=(server_socket, )) thread.start() yield server_addr # Stop the server client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(server_addr) client.send(death_pill) client.close() thread.join() server_socket.close() eventlet-0.30.2/tests/api_test.py0000644000076500000240000001260414006212666017406 0ustar temotostaff00000000000000import eventlet from eventlet import greenio, hubs, greenthread from eventlet.green import ssl import tests def check_hub(): # Clear through the descriptor queue eventlet.sleep(0) eventlet.sleep(0) hub = hubs.get_hub() for nm in 'get_readers', 'get_writers': dct = getattr(hub, nm)() assert not dct, "hub.%s not empty: %s" % (nm, dct) hub.abort(wait=True) assert not hub.running class TestApi(tests.LimitedTestCase): def test_tcp_listener(self): socket = eventlet.listen(('0.0.0.0', 0)) assert socket.getsockname()[0] == '0.0.0.0' socket.close() check_hub() def test_connect_tcp(self): def accept_once(listenfd): try: conn, addr = listenfd.accept() fd = conn.makefile(mode='wb') conn.close() fd.write(b'hello\n') fd.close() finally: listenfd.close() server = eventlet.listen(('0.0.0.0', 0)) eventlet.spawn_n(accept_once, server) client = eventlet.connect(('127.0.0.1', server.getsockname()[1])) fd = client.makefile('rb') client.close() assert fd.readline() == b'hello\n' assert fd.read() == b'' fd.close() check_hub() @tests.skip_if_no_ssl def test_connect_ssl(self): def accept_once(listenfd): try: conn, addr = listenfd.accept() conn.write(b'hello\r\n') greenio.shutdown_safe(conn) conn.close() finally: greenio.shutdown_safe(listenfd) listenfd.close() server = eventlet.wrap_ssl( eventlet.listen(('0.0.0.0', 0)), tests.private_key_file, tests.certificate_file, server_side=True ) eventlet.spawn_n(accept_once, server) raw_client = eventlet.connect(('127.0.0.1', server.getsockname()[1])) client = ssl.wrap_socket(raw_client) fd = client.makefile('rb', 8192) assert fd.readline() == b'hello\r\n' try: self.assertEqual(b'', fd.read(10)) except greenio.SSL.ZeroReturnError: # if it's a GreenSSL object it'll do this pass greenio.shutdown_safe(client) client.close() check_hub() def test_001_trampoline_timeout(self): server_sock = eventlet.listen(('127.0.0.1', 0)) bound_port = server_sock.getsockname()[1] def server(sock): client, addr = sock.accept() eventlet.sleep(0.1) server_evt = eventlet.spawn(server, server_sock) eventlet.sleep(0) try: desc = eventlet.connect(('127.0.0.1', bound_port)) hubs.trampoline(desc, read=True, write=False, timeout=0.001) except eventlet.Timeout: pass # test passed else: assert False, "Didn't timeout" server_evt.wait() check_hub() def test_timeout_cancel(self): server = eventlet.listen(('0.0.0.0', 0)) bound_port = server.getsockname()[1] done = [False] def client_closer(sock): while True: (conn, addr) = sock.accept() conn.close() def go(): desc = eventlet.connect(('127.0.0.1', bound_port)) try: hubs.trampoline(desc, read=True, timeout=0.1) except eventlet.Timeout: assert False, "Timed out" server.close() desc.close() done[0] = True greenthread.spawn_after_local(0, go) server_coro = eventlet.spawn(client_closer, server) while not done[0]: eventlet.sleep(0) eventlet.kill(server_coro) check_hub() def test_killing_dormant(self): DELAY = 0.1 state = [] def test(): try: state.append('start') eventlet.sleep(DELAY) except: state.append('except') # catching GreenletExit pass # when switching to hub, hub makes itself the parent of this greenlet, # thus after the function's done, the control will go to the parent eventlet.sleep(0) state.append('finished') g = eventlet.spawn(test) eventlet.sleep(DELAY / 2) self.assertEqual(state, ['start']) eventlet.kill(g) # will not get there, unless switching is explicitly scheduled by kill self.assertEqual(state, ['start', 'except']) eventlet.sleep(DELAY) self.assertEqual(state, ['start', 'except', 'finished']) def test_nested_with_timeout(self): def func(): return eventlet.with_timeout(0.2, eventlet.sleep, 2, timeout_value=1) try: eventlet.with_timeout(0.1, func) self.fail(u'Expected Timeout') except eventlet.Timeout: pass def test_wrap_is_timeout(): class A(object): pass obj = eventlet.wrap_is_timeout(A)() tests.check_is_timeout(obj) def test_timeouterror_deprecated(): # https://github.com/eventlet/eventlet/issues/378 code = '''import eventlet; eventlet.Timeout(1).cancel(); print('pass')''' args = ['-Werror:eventlet.Timeout:DeprecationWarning', '-c', code] tests.run_python(path=None, args=args, expect_pass=True) eventlet-0.30.2/tests/backdoor_test.py0000644000076500000240000000510614006212666020420 0ustar temotostaff00000000000000import os import os.path import eventlet from eventlet import backdoor from eventlet.green import socket import tests class BackdoorTest(tests.LimitedTestCase): def test_server(self): listener = socket.socket() listener.bind(('localhost', 0)) listener.listen(50) serv = eventlet.spawn(backdoor.backdoor_server, listener) client = socket.socket() client.connect(('localhost', listener.getsockname()[1])) self._run_test_on_client_and_server(client, serv) def _run_test_on_client_and_server(self, client, server_thread): f = client.makefile('rw') assert 'Python' in f.readline() f.readline() # build info f.readline() # help info assert 'InteractiveConsole' in f.readline() self.assertEqual('>>> ', f.read(4)) f.write('print("hi")\n') f.flush() self.assertEqual('hi\n', f.readline()) self.assertEqual('>>> ', f.read(4)) f.close() client.close() server_thread.kill() # wait for the console to discover that it's dead eventlet.sleep(0.1) @tests.skip_if_no_ipv6 def test_server_on_ipv6_socket(self): listener = socket.socket(socket.AF_INET6) listener.bind(('::', 0)) listener.listen(5) serv = eventlet.spawn(backdoor.backdoor_server, listener) client = socket.socket(socket.AF_INET6) client.connect(listener.getsockname()) self._run_test_on_client_and_server(client, serv) def test_server_on_unix_socket(self): SOCKET_PATH = '/tmp/eventlet_backdoor_test.socket' if os.path.exists(SOCKET_PATH): os.unlink(SOCKET_PATH) listener = socket.socket(socket.AF_UNIX) listener.bind(SOCKET_PATH) listener.listen(5) serv = eventlet.spawn(backdoor.backdoor_server, listener) client = socket.socket(socket.AF_UNIX) client.connect(SOCKET_PATH) self._run_test_on_client_and_server(client, serv) def test_quick_client_disconnect(self): listener = socket.socket() listener.bind(('localhost', 0)) listener.listen(50) serv = eventlet.spawn(backdoor.backdoor_server, listener) client = socket.socket() client.connect(('localhost', listener.getsockname()[1])) client.close() # can still reconnect; server is running client = socket.socket() client.connect(('localhost', listener.getsockname()[1])) client.close() serv.kill() # wait for the console to discover that it's dead eventlet.sleep(0.1) eventlet-0.30.2/tests/convenience_test.py0000644000076500000240000001433414006212666021133 0ustar temotostaff00000000000000import os import warnings import eventlet from eventlet import convenience, debug from eventlet.green import socket import six import tests import tests.mock certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt') private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key') class TestServe(tests.LimitedTestCase): def setUp(self): super(TestServe, self).setUp() debug.hub_exceptions(False) def tearDown(self): super(TestServe, self).tearDown() debug.hub_exceptions(True) def test_exiting_server(self): # tests that the server closes the client sock on handle() exit def closer(sock, addr): pass l = eventlet.listen(('localhost', 0)) gt = eventlet.spawn(eventlet.serve, l, closer) client = eventlet.connect(('localhost', l.getsockname()[1])) client.sendall(b'a') self.assertFalse(client.recv(100)) gt.kill() def test_excepting_server(self): # tests that the server closes the client sock on handle() exception def crasher(sock, addr): sock.recv(1024) 0 // 0 l = eventlet.listen(('localhost', 0)) gt = eventlet.spawn(eventlet.serve, l, crasher) client = eventlet.connect(('localhost', l.getsockname()[1])) client.sendall(b'a') self.assertRaises(ZeroDivisionError, gt.wait) self.assertFalse(client.recv(100)) def test_excepting_server_already_closed(self): # same as above but with explicit clsoe before crash def crasher(sock, addr): sock.recv(1024) sock.close() 0 // 0 l = eventlet.listen(('localhost', 0)) gt = eventlet.spawn(eventlet.serve, l, crasher) client = eventlet.connect(('localhost', l.getsockname()[1])) client.sendall(b'a') self.assertRaises(ZeroDivisionError, gt.wait) self.assertFalse(client.recv(100)) def test_called_for_each_connection(self): hits = [0] def counter(sock, addr): hits[0] += 1 l = eventlet.listen(('localhost', 0)) gt = eventlet.spawn(eventlet.serve, l, counter) for i in six.moves.range(100): client = eventlet.connect(('localhost', l.getsockname()[1])) self.assertFalse(client.recv(100)) gt.kill() self.assertEqual(100, hits[0]) def test_blocking(self): l = eventlet.listen(('localhost', 0)) x = eventlet.with_timeout( 0.01, eventlet.serve, l, lambda c, a: None, timeout_value="timeout") self.assertEqual(x, "timeout") def test_raising_stopserve(self): def stopit(conn, addr): raise eventlet.StopServe() l = eventlet.listen(('localhost', 0)) # connect to trigger a call to stopit gt = eventlet.spawn(eventlet.connect, ('localhost', l.getsockname()[1])) eventlet.serve(l, stopit) gt.wait() def test_concurrency(self): evt = eventlet.Event() def waiter(sock, addr): sock.sendall(b'hi') evt.wait() l = eventlet.listen(('localhost', 0)) eventlet.spawn(eventlet.serve, l, waiter, 5) def test_client(): c = eventlet.connect(('localhost', l.getsockname()[1])) # verify the client is connected by getting data self.assertEqual(b'hi', c.recv(2)) return c [test_client() for i in range(5)] # very next client should not get anything x = eventlet.with_timeout( 0.01, test_client, timeout_value="timed out") self.assertEqual(x, "timed out") @tests.skip_if_no_ssl def test_wrap_ssl(self): server = eventlet.wrap_ssl( eventlet.listen(('localhost', 0)), certfile=certificate_file, keyfile=private_key_file, server_side=True) port = server.getsockname()[1] def handle(sock, addr): sock.sendall(sock.recv(1024)) raise eventlet.StopServe() eventlet.spawn(eventlet.serve, server, handle) client = eventlet.wrap_ssl(eventlet.connect(('localhost', port))) client.sendall(b"echo") self.assertEqual(b"echo", client.recv(1024)) def test_socket_reuse(): # pick a free port with bind to 0 - without SO_REUSEPORT # then close it and try to bind to same port with SO_REUSEPORT # loop helps in case something else used the chosen port before second bind addr = None errors = [] for _ in range(5): lsock1 = eventlet.listen(('localhost', 0)) addr = lsock1.getsockname() lsock1.close() try: lsock1 = eventlet.listen(addr) except socket.error as e: errors.append(e) continue break else: assert False, errors if hasattr(socket, 'SO_REUSEPORT'): lsock2 = eventlet.listen(addr) else: try: lsock2 = eventlet.listen(addr) assert lsock2 lsock2.close() except socket.error: pass lsock1.close() def test_reuse_random_port_warning(): with warnings.catch_warnings(record=True) as w: eventlet.listen(('localhost', 0), reuse_port=True).close() assert len(w) == 1 assert issubclass(w[0].category, convenience.ReuseRandomPortWarning) @tests.skip_unless(hasattr(socket, 'SO_REUSEPORT')) def test_reuseport_oserror(): # https://github.com/eventlet/eventlet/issues/380 # https://github.com/eventlet/eventlet/issues/418 err22 = OSError(22, 'Invalid argument') sock1 = eventlet.listen(('localhost', 0)) addr = sock1.getsockname() sock1.close() original_socket_init = socket.socket.__init__ def patched(self, *a, **kw): original_socket_init(self, *a, **kw) self.setsockopt = tests.mock.Mock(side_effect=err22) with warnings.catch_warnings(record=True) as w: try: socket.socket.__init__ = patched eventlet.listen(addr, reuse_addr=False, reuse_port=True).close() finally: socket.socket.__init__ = original_socket_init assert len(w) == 1 assert issubclass(w[0].category, convenience.ReusePortUnavailableWarning) eventlet-0.30.2/tests/dagpool_test.py0000644000076500000240000005515614006212666020273 0ustar temotostaff00000000000000"""\ @file dagpool_test.py @author Nat Goodspeed @date 2016-08-26 @brief Test DAGPool class """ from nose.tools import * import eventlet from eventlet.dagpool import DAGPool, Collision, PropagateError import six from contextlib import contextmanager import itertools # Not all versions of nose.tools.assert_raises() support the usage in this # module, but it's straightforward enough to code that explicitly. @contextmanager def assert_raises(exc): """exc is an exception class""" try: yield except exc: pass else: raise AssertionError("failed to raise expected exception {0}" .format(exc.__class__.__name__)) def assert_in(sought, container): assert sought in container, "{0} not in {1}".format(sought, container) # **************************************************************************** # Verify that a given operation returns without suspending # **************************************************************************** # module-scope counter allows us to verify when the main greenthread running # the test does or does not suspend counter = None def incrementer(): """ This function runs as a background greenthread. Every time it regains control, it increments 'counter' and relinquishes control again. The point is that by testing 'counter' before and after a particular operation, a test can determine whether other greenthreads were allowed to run during that operation -- in other words, whether that operation suspended. """ global counter # suspend_checker() initializes counter to 0, so the first time we get # control, set it to 1 for counter in itertools.count(1): eventlet.sleep(0) @contextmanager def suspend_checker(): """ This context manager enables check_no_suspend() support. It runs the incrementer() function as a background greenthread, then kills it off when you exit the block. """ global counter # make counter not None to enable check_no_suspend() counter = 0 coro = eventlet.spawn(incrementer) yield coro.kill() # set counter back to None to disable check_no_suspend() counter = None @contextmanager def check_no_suspend(): """ Within a 'with suspend_checker()' block, use 'with check_no_suspend()' to verify that a particular operation does not suspend the calling greenthread. If it does suspend, incrementer() will have regained control and incremented the global 'counter'. """ global counter # It would be an easy mistake to use check_no_suspend() outside of a # suspend_checker() block. Without the incrementer() greenthread running, # 'counter' will never be incremented, therefore check_no_suspend() will # always be satisfied, possibly masking bugs. assert counter is not None, "Use 'with suspend_checker():' to enable check_no_suspend()" current = counter yield assert counter == current, "Operation suspended {0} times".format(counter - current) def test_check_no_suspend(): with assert_raises(AssertionError): # We WANT this to raise AssertionError because it's outside of a # suspend_checker() block -- that is, we have no incrementer() # greenthread. with check_no_suspend(): pass # Here we use check_no_suspend() the right way, inside 'with # suspend_checker()'. Does it really do what we claim it should? with suspend_checker(): with assert_raises(AssertionError): with check_no_suspend(): # suspend, so we know if check_no_suspend() asserts eventlet.sleep(0) # **************************************************************************** # Verify that the expected things happened in the expected order # **************************************************************************** class Capture(object): """ This class is intended to capture a sequence (of string messages) to verify that all expected events occurred, and in the expected order. The tricky part is that certain subsequences can occur in arbitrary order and still be correct. Specifically, when posting a particular value to a DAGPool instance unblocks several waiting greenthreads, it is indeterminate which greenthread will first receive the new value. Similarly, when several values for which a particular greenthread is waiting become available at (effectively) the same time, it is indeterminate in which order they will be delivered. This is addressed by building a list of sets. Each set contains messages that can occur in indeterminate order, therefore comparing that set to any other ordering of the same messages should succeed. However, it's important that each set of messages that occur 'at the same time' should itself be properly sequenced with respect to all other such sets. """ def __init__(self): self.sequence = [set()] def add(self, message): self.sequence[-1].add(message) def step(self): self.sequence.append(set()) def validate(self, sequence): # Let caller pass any sequence of grouped items. For comparison # purposes, turn them into the specific form we store: a list of sets. setlist = [] for subseq in sequence: if isinstance(subseq, six.string_types): # If this item is a plain string (which Python regards as an # iterable of characters) rather than a list or tuple or set # of strings, treat it as atomic. Make a set containing only # that string. setlist.append(set([subseq])) else: try: iter(subseq) except TypeError: # subseq is a scalar of some other kind. Make a set # containing only that item. setlist.append(set([subseq])) else: # subseq is, as we expect, an iterable -- possibly already # a set. Make a set containing its elements. setlist.append(set(subseq)) # Now that we've massaged 'sequence' into 'setlist', compare. assert_equal(self.sequence, setlist) # **************************************************************************** # Canonical DAGPool greenthread function # **************************************************************************** def observe(key, results, capture, event): for k, v in results: capture.add("{0} got {1}".format(key, k)) result = event.wait() capture.add("{0} returning {1}".format(key, result)) return result # **************************************************************************** # DAGPool test functions # **************************************************************************** def test_init(): with suspend_checker(): # no preload data, just so we know it doesn't blow up pool = DAGPool() # preload dict pool = DAGPool(dict(a=1, b=2, c=3)) # this must not hang with check_no_suspend(): results = pool.waitall() # with no spawn() or post(), waitall() returns preload data assert_equals(results, dict(a=1, b=2, c=3)) # preload sequence of pairs pool = DAGPool([("d", 4), ("e", 5), ("f", 6)]) # this must not hang with check_no_suspend(): results = pool.waitall() assert_equals(results, dict(d=4, e=5, f=6)) def test_wait_each_empty(): pool = DAGPool() with suspend_checker(): with check_no_suspend(): for k, v in pool.wait_each(()): # shouldn't yield anything raise AssertionError("empty wait_each() returned ({0}, {1})".format(k, v)) def test_wait_each_preload(): pool = DAGPool(dict(a=1, b=2, c=3)) with suspend_checker(): with check_no_suspend(): # wait_each() may deliver in arbitrary order; collect into a dict # for comparison assert_equals(dict(pool.wait_each("abc")), dict(a=1, b=2, c=3)) # while we're at it, test wait() for preloaded keys assert_equals(pool.wait("bc"), dict(b=2, c=3)) def post_each(pool, capture): # distinguish the results wait_each() can retrieve immediately from those # it must wait for us to post() eventlet.sleep(0) capture.step() pool.post('g', 'gval') pool.post('f', 'fval') eventlet.sleep(0) capture.step() pool.post('e', 'eval') pool.post('d', 'dval') def test_wait_each_posted(): capture = Capture() pool = DAGPool(dict(a=1, b=2, c=3)) eventlet.spawn(post_each, pool, capture) # use a string as a convenient iterable of single-letter keys for k, v in pool.wait_each("bcdefg"): capture.add("got ({0}, {1})".format(k, v)) capture.validate([ ["got (b, 2)", "got (c, 3)"], ["got (f, fval)", "got (g, gval)"], ["got (d, dval)", "got (e, eval)"], ]) def test_wait_posted(): # same as test_wait_each_posted(), but calling wait() capture = Capture() pool = DAGPool(dict(a=1, b=2, c=3)) eventlet.spawn(post_each, pool, capture) gotten = pool.wait("bcdefg") capture.add("got all") assert_equals(gotten, dict(b=2, c=3, d="dval", e="eval", f="fval", g="gval")) capture.validate([ [], [], ["got all"], ]) def test_spawn_collision_preload(): pool = DAGPool([("a", 1)]) with assert_raises(Collision): pool.spawn("a", (), lambda key, results: None) def test_spawn_collision_post(): pool = DAGPool() pool.post("a", "aval") with assert_raises(Collision): pool.spawn("a", (), lambda key, results: None) def test_spawn_collision_spawn(): pool = DAGPool() pool.spawn("a", (), lambda key, results: "aval") # hasn't yet even started assert_equals(pool.get("a"), None) with assert_raises(Collision): # Attempting to spawn again with same key should collide even if the # first spawned greenthread hasn't yet had a chance to run. pool.spawn("a", (), lambda key, results: "bad") # now let the spawned eventlet run eventlet.sleep(0) # should have finished assert_equals(pool.get("a"), "aval") with assert_raises(Collision): # Attempting to spawn with same key collides even when the greenthread # has completed. pool.spawn("a", (), lambda key, results: "badagain") def spin(): # Let all pending greenthreads run until they're blocked for x in range(10): eventlet.sleep(0) def test_spawn_multiple(): capture = Capture() pool = DAGPool(dict(a=1, b=2, c=3)) events = {} for k in "defg": events[k] = eventlet.event.Event() pool.spawn(k, (), observe, capture, events[k]) # Now for a greenthread that depends on ALL the above. events["h"] = eventlet.event.Event() # trigger the last event right away: we only care about dependencies events["h"].send("hval") pool.spawn("h", "bcdefg", observe, capture, events["h"]) # let all the spawned greenthreads get as far as they can spin() capture.step() # but none of them has yet produced a result for k in "defgh": assert_equals(pool.get(k), None) assert_equals(set(pool.keys()), set("abc")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3)) assert_equals(pool.running(), 5) assert_equals(set(pool.running_keys()), set("defgh")) assert_equals(pool.waiting(), 1) assert_equals(pool.waiting_for(), dict(h=set("defg"))) assert_equals(pool.waiting_for("d"), set()) assert_equals(pool.waiting_for("c"), set()) with assert_raises(KeyError): pool.waiting_for("j") assert_equals(pool.waiting_for("h"), set("defg")) # let one of the upstream greenthreads complete events["f"].send("fval") spin() capture.step() assert_equals(pool.get("f"), "fval") assert_equals(set(pool.keys()), set("abcf")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, f="fval")) assert_equals(pool.running(), 4) assert_equals(set(pool.running_keys()), set("degh")) assert_equals(pool.waiting(), 1) assert_equals(pool.waiting_for("h"), set("deg")) # now two others events["e"].send("eval") events["g"].send("gval") spin() capture.step() assert_equals(pool.get("e"), "eval") assert_equals(pool.get("g"), "gval") assert_equals(set(pool.keys()), set("abcefg")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, e="eval", f="fval", g="gval")) assert_equals(pool.running(), 2) assert_equals(set(pool.running_keys()), set("dh")) assert_equals(pool.waiting(), 1) assert_equals(pool.waiting_for("h"), set("d")) # last one events["d"].send("dval") # make sure both pool greenthreads get a chance to run spin() capture.step() assert_equals(pool.get("d"), "dval") assert_equals(set(pool.keys()), set("abcdefgh")) assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, d="dval", e="eval", f="fval", g="gval", h="hval")) assert_equals(pool.running(), 0) assert_false(pool.running_keys()) assert_equals(pool.waiting(), 0) assert_equals(pool.waiting_for("h"), set()) capture.validate([ ["h got b", "h got c"], ["f returning fval", "h got f"], ["e returning eval", "g returning gval", "h got e", "h got g"], ["d returning dval", "h got d", "h returning hval"], [], ]) def spawn_many_func(key, results, capture, pool): for k, v in results: # with a capture.step() at each post(), too complicated to predict # which results will be delivered when pass capture.add("{0} done".format(key)) # use post(key) instead of waiting for implicit post() of return value pool.post(key, key) capture.step() spin() def waitall_done(capture, pool): pool.waitall() capture.add("waitall() done") def test_spawn_many(): # This dependencies dict sets up a graph like this: # a # / \ # b c # \ /| # d | # \| # e deps = dict(e="cd", d="bc", c="a", b="a", a="") capture = Capture() pool = DAGPool() # spawn a waitall() waiter externally to our DAGPool, but capture its # message in same Capture instance eventlet.spawn(waitall_done, capture, pool) pool.spawn_many(deps, spawn_many_func, capture, pool) # This set of greenthreads should in fact run to completion once spawned. spin() # verify that e completed (also that post(key) within greenthread # overrides implicit post of return value, which would be None) assert_equals(pool.get("e"), "e") # With the dependency graph shown above, it is not guaranteed whether b or # c will complete first. Handle either case. sequence = capture.sequence[:] sequence[1:3] = [set([sequence[1].pop(), sequence[2].pop()])] assert_equals(sequence, [set(["a done"]), set(["b done", "c done"]), set(["d done"]), set(["e done"]), set(["waitall() done"]), ]) # deliberately distinguish this from dagpool._MISSING _notthere = object() def test_wait_each_all(): # set up a simple linear dependency chain deps = dict(b="a", c="b", d="c", e="d") capture = Capture() pool = DAGPool([("a", "a")]) # capture a different Event for each key events = dict((key, eventlet.event.Event()) for key in six.iterkeys(deps)) # can't use spawn_many() because we need a different event for each for key, dep in six.iteritems(deps): pool.spawn(key, dep, observe, capture, events[key]) keys = "abcde" # this specific order each = iter(pool.wait_each()) for pos in range(len(keys)): # next value from wait_each() k, v = next(each) assert_equals(k, keys[pos]) # advance every pool greenlet as far as it can go spin() # everything from keys[:pos+1] should have a value by now for k in keys[:pos + 1]: assert pool.get(k, _notthere) is not _notthere, \ "greenlet {0} did not yet produce a value".format(k) # everything from keys[pos+1:] should not yet for k in keys[pos + 1:]: assert pool.get(k, _notthere) is _notthere, \ "wait_each() delayed value for {0}".format(keys[pos]) # let next greenthread complete if pos < len(keys) - 1: k = keys[pos + 1] events[k].send(k) def test_kill(): pool = DAGPool() # nonexistent key raises KeyError with assert_raises(KeyError): pool.kill("a") # spawn a greenthread pool.spawn("a", (), lambda key, result: 1) # kill it before it can even run pool.kill("a") # didn't run spin() assert_equals(pool.get("a"), None) # killing it forgets about it with assert_raises(KeyError): pool.kill("a") # so that we can try again pool.spawn("a", (), lambda key, result: 2) spin() # this time it ran to completion, so can no longer be killed with assert_raises(KeyError): pool.kill("a") # verify it ran to completion assert_equals(pool.get("a"), 2) def test_post_collision_preload(): pool = DAGPool(dict(a=1)) with assert_raises(Collision): pool.post("a", 2) def test_post_collision_post(): pool = DAGPool() pool.post("a", 1) with assert_raises(Collision): pool.post("a", 2) def test_post_collision_spawn(): pool = DAGPool() pool.spawn("a", (), lambda key, result: 1) # hasn't yet run with assert_raises(Collision): # n.b. This exercises the code that tests whether post(key) is or is # not coming from that key's greenthread. pool.post("a", 2) # kill it pool.kill("a") # now we can post pool.post("a", 3) assert_equals(pool.get("a"), 3) pool = DAGPool() pool.spawn("a", (), lambda key, result: 4) # run it spin() with assert_raises(Collision): pool.post("a", 5) # can't kill it now either with assert_raises(KeyError): pool.kill("a") # still can't post with assert_raises(Collision): pool.post("a", 6) def test_post_replace(): pool = DAGPool() pool.post("a", 1) pool.post("a", 2, replace=True) assert_equals(pool.get("a"), 2) assert_equals(dict(pool.wait_each("a")), dict(a=2)) assert_equals(pool.wait("a"), dict(a=2)) assert_equals(pool["a"], 2) def waitfor(capture, pool, key): value = pool[key] capture.add("got {0}".format(value)) def test_getitem(): capture = Capture() pool = DAGPool() eventlet.spawn(waitfor, capture, pool, "a") # pool["a"] just waiting capture.validate([[]]) pool.spawn("a", (), lambda key, results: 1) # still waiting: hasn't yet run capture.validate([[]]) # run it spin() capture.validate([["got 1"]]) class BogusError(Exception): pass def raiser(key, results, exc): raise exc def consumer(key, results): for k, v in results: pass return True def test_waitall_exc(): pool = DAGPool() pool.spawn("a", (), raiser, BogusError("bogus")) try: pool.waitall() except PropagateError as err: assert_equals(err.key, "a") assert isinstance(err.exc, BogusError), \ "exc attribute is {0}, not BogusError".format(err.exc) assert_equals(str(err.exc), "bogus") msg = str(err) assert_in("PropagateError(a)", msg) assert_in("BogusError", msg) assert_in("bogus", msg) def test_propagate_exc(): pool = DAGPool() pool.spawn("a", (), raiser, BogusError("bogus")) pool.spawn("b", "a", consumer) pool.spawn("c", "b", consumer) try: pool["c"] except PropagateError as errc: assert_equals(errc.key, "c") errb = errc.exc assert_equals(errb.key, "b") erra = errb.exc assert_equals(erra.key, "a") assert isinstance(erra.exc, BogusError), \ "exc attribute is {0}, not BogusError".format(erra.exc) assert_equals(str(erra.exc), "bogus") msg = str(errc) assert_in("PropagateError(a)", msg) assert_in("PropagateError(b)", msg) assert_in("PropagateError(c)", msg) assert_in("BogusError", msg) assert_in("bogus", msg) def test_wait_each_exc(): pool = DAGPool() pool.spawn("a", (), raiser, BogusError("bogus")) with assert_raises(PropagateError): for k, v in pool.wait_each("a"): pass with assert_raises(PropagateError): for k, v in pool.wait_each(): pass def test_post_get_exc(): pool = DAGPool() bogua = BogusError("bogua") pool.post("a", bogua) assert isinstance(pool.get("a"), BogusError), \ "should have delivered BogusError instead of raising" bogub = PropagateError("b", BogusError("bogub")) pool.post("b", bogub) with assert_raises(PropagateError): pool.get("b") # Notice that although we have both "a" and "b" keys, items() is # guaranteed to raise PropagateError because one of them is # PropagateError. Other values don't matter. with assert_raises(PropagateError): pool.items() # Similar remarks about waitall() and wait(). with assert_raises(PropagateError): pool.waitall() with assert_raises(PropagateError): pool.wait() with assert_raises(PropagateError): pool.wait("b") with assert_raises(PropagateError): pool.wait("ab") # but if we're only wait()ing for success results, no exception assert isinstance(pool.wait("a")["a"], BogusError), \ "should have delivered BogusError instead of raising" # wait_each() is guaranteed to eventually raise PropagateError, though you # may obtain valid values before you hit it. with assert_raises(PropagateError): for k, v in pool.wait_each(): pass # wait_each_success() filters assert_equals(dict(pool.wait_each_success()), dict(a=bogua)) assert_equals(dict(pool.wait_each_success("ab")), dict(a=bogua)) assert_equals(dict(pool.wait_each_success("a")), dict(a=bogua)) assert_equals(dict(pool.wait_each_success("b")), {}) # wait_each_exception() filters the other way assert_equals(dict(pool.wait_each_exception()), dict(b=bogub)) assert_equals(dict(pool.wait_each_exception("ab")), dict(b=bogub)) assert_equals(dict(pool.wait_each_exception("a")), {}) assert_equals(dict(pool.wait_each_exception("b")), dict(b=bogub)) eventlet-0.30.2/tests/db_pool_test.py0000644000076500000240000004313214006212666020253 0ustar temotostaff00000000000000from __future__ import print_function import os import sys import traceback from eventlet import db_pool import six import eventlet import eventlet.tpool import tests import tests.mock psycopg2 = None try: import psycopg2 import psycopg2.extensions except ImportError: pass MySQLdb = None try: import MySQLdb except ImportError: pass class DBTester(object): __test__ = False # so that nose doesn't try to execute this directly def setUp(self): self.create_db() self.connection = None connection = self._dbmodule.connect(**self._auth) cursor = connection.cursor() cursor.execute("""CREATE TABLE gargleblatz ( a INTEGER );""") connection.commit() cursor.close() connection.close() def tearDown(self): if self.connection: self.connection.close() self.drop_db() def set_up_dummy_table(self, connection=None): close_connection = False if connection is None: close_connection = True if self.connection is None: connection = self._dbmodule.connect(**self._auth) else: connection = self.connection cursor = connection.cursor() cursor.execute(self.dummy_table_sql) connection.commit() cursor.close() if close_connection: connection.close() # silly mock class class Mock(object): pass class DBConnectionPool(DBTester): __test__ = False # so that nose doesn't try to execute this directly def setUp(self): super(DBConnectionPool, self).setUp() self.pool = self.create_pool() self.connection = self.pool.get() def tearDown(self): if self.connection: self.pool.put(self.connection) self.pool.clear() super(DBConnectionPool, self).tearDown() def assert_cursor_works(self, cursor): cursor.execute("select 1") rows = cursor.fetchall() assert rows def test_connecting(self): assert self.connection is not None def test_create_cursor(self): cursor = self.connection.cursor() cursor.close() def test_run_query(self): cursor = self.connection.cursor() self.assert_cursor_works(cursor) cursor.close() def test_run_bad_query(self): cursor = self.connection.cursor() try: cursor.execute("garbage blah blah") assert False except AssertionError: raise except Exception: pass cursor.close() def test_put_none(self): # the pool is of size 1, and its only connection is out assert self.pool.free() == 0 self.pool.put(None) # ha ha we fooled it into thinking that we had a dead process assert self.pool.free() == 1 conn2 = self.pool.get() assert conn2 is not None assert conn2.cursor self.pool.put(conn2) def test_close_does_a_put(self): assert self.pool.free() == 0 self.connection.close() assert self.pool.free() == 1 self.assertRaises(AttributeError, self.connection.cursor) def test_put_doesnt_double_wrap(self): self.pool.put(self.connection) conn = self.pool.get() assert not isinstance(conn._base, db_pool.PooledConnectionWrapper) self.pool.put(conn) def test_bool(self): assert self.connection self.connection.close() assert not self.connection def fill_up_table(self, conn): curs = conn.cursor() for i in six.moves.range(1000): curs.execute('insert into test_table (value_int) values (%s)' % i) conn.commit() def test_returns_immediately(self): self.pool = self.create_pool() conn = self.pool.get() self.set_up_dummy_table(conn) self.fill_up_table(conn) curs = conn.cursor() results = [] SHORT_QUERY = "select * from test_table" evt = eventlet.Event() def a_query(): self.assert_cursor_works(curs) curs.execute(SHORT_QUERY) results.append(2) evt.send() eventlet.spawn(a_query) results.append(1) self.assertEqual([1], results) evt.wait() self.assertEqual([1, 2], results) self.pool.put(conn) def test_connection_is_clean_after_put(self): self.pool = self.create_pool() conn = self.pool.get() self.set_up_dummy_table(conn) curs = conn.cursor() for i in range(10): curs.execute('insert into test_table (value_int) values (%s)' % i) # do not commit :-) self.pool.put(conn) del conn conn2 = self.pool.get() curs2 = conn2.cursor() for i in range(10): curs2.execute('insert into test_table (value_int) values (%s)' % i) conn2.commit() curs2.execute("select * from test_table") # we should have only inserted them once self.assertEqual(10, curs2.rowcount) self.pool.put(conn2) def test_visibility_from_other_connections(self): self.pool = self.create_pool(max_size=3) conn = self.pool.get() conn2 = self.pool.get() curs = conn.cursor() try: curs2 = conn2.cursor() curs2.execute("insert into gargleblatz (a) values (%s)" % (314159)) self.assertEqual(curs2.rowcount, 1) conn2.commit() selection_query = "select * from gargleblatz" curs2.execute(selection_query) self.assertEqual(curs2.rowcount, 1) del curs2 self.pool.put(conn2) # create a new connection, it should see the addition conn3 = self.pool.get() curs3 = conn3.cursor() curs3.execute(selection_query) self.assertEqual(curs3.rowcount, 1) # now, does the already-open connection see it? curs.execute(selection_query) self.assertEqual(curs.rowcount, 1) self.pool.put(conn3) finally: # clean up my litter curs.execute("delete from gargleblatz where a=314159") conn.commit() self.pool.put(conn) def test_clear(self): self.pool = self.create_pool() self.pool.put(self.connection) self.pool.clear() self.assertEqual(len(self.pool.free_items), 0) def test_clear_warmup(self): """Clear implicitly created connections (min_size > 0)""" self.pool = self.create_pool(min_size=1) self.pool.clear() self.assertEqual(len(self.pool.free_items), 0) def test_unwrap_connection(self): self.assert_(isinstance(self.connection, db_pool.GenericConnectionWrapper)) conn = self.pool._unwrap_connection(self.connection) assert not isinstance(conn, db_pool.GenericConnectionWrapper) self.assertEqual(None, self.pool._unwrap_connection(None)) self.assertEqual(None, self.pool._unwrap_connection(1)) # testing duck typing here -- as long as the connection has a # _base attribute, it should be unwrappable x = Mock() x._base = 'hi' self.assertEqual('hi', self.pool._unwrap_connection(x)) conn.close() def test_safe_close(self): self.pool._safe_close(self.connection, quiet=True) self.assertEqual(len(self.pool.free_items), 1) self.pool._safe_close(None) self.pool._safe_close(1) # now we're really going for 100% coverage x = Mock() def fail(): raise KeyboardInterrupt() x.close = fail self.assertRaises(KeyboardInterrupt, self.pool._safe_close, x) x = Mock() def fail2(): raise RuntimeError("if this line has been printed, the test succeeded") x.close = fail2 self.pool._safe_close(x, quiet=False) def test_zero_max_idle(self): self.pool.put(self.connection) self.pool.clear() self.pool = self.create_pool(max_size=2, max_idle=0) self.connection = self.pool.get() self.connection.close() self.assertEqual(len(self.pool.free_items), 0) def test_zero_max_age(self): self.pool.put(self.connection) self.pool.clear() self.pool = self.create_pool(max_size=2, max_age=0) self.connection = self.pool.get() self.connection.close() self.assertEqual(len(self.pool.free_items), 0) def test_waiters_get_woken(self): # verify that when there's someone waiting on an empty pool # and someone puts an immediately-closed connection back in # the pool that the waiter gets woken self.pool.put(self.connection) self.pool.clear() self.pool = self.create_pool(max_size=1, max_age=0) self.connection = self.pool.get() self.assertEqual(self.pool.free(), 0) self.assertEqual(self.pool.waiting(), 0) e = eventlet.Event() def retrieve(pool, ev): c = pool.get() ev.send(c) eventlet.spawn(retrieve, self.pool, e) eventlet.sleep(0) # these two sleeps should advance the retrieve eventlet.sleep(0) # coroutine until it's waiting in get() self.assertEqual(self.pool.free(), 0) self.assertEqual(self.pool.waiting(), 1) self.pool.put(self.connection) timer = eventlet.Timeout(1) conn = e.wait() timer.cancel() self.assertEqual(self.pool.free(), 0) self.assertEqual(self.pool.waiting(), 0) self.pool.put(conn) def test_raising_create(self): # if the create() method raises an exception the pool should # not lose any connections self.pool = self.create_pool(max_size=1, module=RaisingDBModule()) self.assertRaises(RuntimeError, self.pool.get) self.assertEqual(self.pool.free(), 1) class DummyConnection(object): def rollback(self): pass class DummyDBModule(object): def connect(self, *args, **kwargs): return DummyConnection() class RaisingDBModule(object): def connect(self, *args, **kw): raise RuntimeError() class TpoolConnectionPool(DBConnectionPool): __test__ = False # so that nose doesn't try to execute this directly def create_pool(self, min_size=0, max_size=1, max_idle=10, max_age=10, connect_timeout=0.5, module=None): if module is None: module = self._dbmodule return db_pool.TpooledConnectionPool( module, min_size=min_size, max_size=max_size, max_idle=max_idle, max_age=max_age, connect_timeout=connect_timeout, **self._auth) @tests.skip_with_pyevent def setUp(self): super(TpoolConnectionPool, self).setUp() def tearDown(self): super(TpoolConnectionPool, self).tearDown() eventlet.tpool.killall() class RawConnectionPool(DBConnectionPool): __test__ = False # so that nose doesn't try to execute this directly def create_pool(self, min_size=0, max_size=1, max_idle=10, max_age=10, connect_timeout=0.5, module=None): if module is None: module = self._dbmodule return db_pool.RawConnectionPool( module, min_size=min_size, max_size=max_size, max_idle=max_idle, max_age=max_age, connect_timeout=connect_timeout, **self._auth) def test_raw_pool_issue_125(): # pool = self.create_pool(min_size=3, max_size=5) pool = db_pool.RawConnectionPool( DummyDBModule(), dsn="dbname=test user=jessica port=5433", min_size=3, max_size=5) conn = pool.get() pool.put(conn) def test_raw_pool_custom_cleanup_ok(): cleanup_mock = tests.mock.Mock() pool = db_pool.RawConnectionPool(DummyDBModule(), cleanup=cleanup_mock) conn = pool.get() pool.put(conn) assert cleanup_mock.call_count == 1 with pool.item() as conn: pass assert cleanup_mock.call_count == 2 def test_raw_pool_custom_cleanup_arg_error(): cleanup_mock = tests.mock.Mock(side_effect=NotImplementedError) pool = db_pool.RawConnectionPool(DummyDBModule()) conn = pool.get() pool.put(conn, cleanup=cleanup_mock) assert cleanup_mock.call_count == 1 with pool.item(cleanup=cleanup_mock): pass assert cleanup_mock.call_count == 2 def test_raw_pool_custom_cleanup_fatal(): state = [0] def cleanup(conn): state[0] += 1 raise KeyboardInterrupt pool = db_pool.RawConnectionPool(DummyDBModule(), cleanup=cleanup) conn = pool.get() try: pool.put(conn) except KeyboardInterrupt: pass else: assert False, 'Expected KeyboardInterrupt' assert state[0] == 1 def test_raw_pool_clear_update_current_size(): # https://github.com/eventlet/eventlet/issues/139 # BaseConnectionPool.clear does not update .current_size. # That leads to situation when new connections could not be created. pool = db_pool.RawConnectionPool(DummyDBModule()) pool.get().close() assert pool.current_size == 1 assert len(pool.free_items) == 1 pool.clear() assert pool.current_size == 0 assert len(pool.free_items) == 0 def mysql_requirement(_f): verbose = os.environ.get('eventlet_test_mysql_verbose') if MySQLdb is None: if verbose: print(">> Skipping mysql tests, MySQLdb not importable", file=sys.stderr) return False try: auth = tests.get_database_auth()['MySQLdb'].copy() MySQLdb.connect(**auth) return True except MySQLdb.OperationalError: if verbose: print(">> Skipping mysql tests, error when connecting:", file=sys.stderr) traceback.print_exc() return False class MysqlConnectionPool(object): dummy_table_sql = """CREATE TEMPORARY TABLE test_table ( row_id INTEGER PRIMARY KEY AUTO_INCREMENT, value_int INTEGER, value_float FLOAT, value_string VARCHAR(200), value_uuid CHAR(36), value_binary BLOB, value_binary_string VARCHAR(200) BINARY, value_enum ENUM('Y','N'), created TIMESTAMP ) ENGINE=InnoDB;""" @tests.skip_unless(mysql_requirement) def setUp(self): self._dbmodule = MySQLdb self._auth = tests.get_database_auth()['MySQLdb'] super(MysqlConnectionPool, self).setUp() def tearDown(self): super(MysqlConnectionPool, self).tearDown() def create_db(self): auth = self._auth.copy() try: self.drop_db() except Exception: pass dbname = 'test%s' % os.getpid() db = self._dbmodule.connect(**auth).cursor() db.execute("create database " + dbname) db.close() self._auth['db'] = dbname del db def drop_db(self): db = self._dbmodule.connect(**self._auth).cursor() db.execute("drop database " + self._auth['db']) db.close() del db class Test01MysqlTpool(MysqlConnectionPool, TpoolConnectionPool, tests.LimitedTestCase): __test__ = True class Test02MysqlRaw(MysqlConnectionPool, RawConnectionPool, tests.LimitedTestCase): __test__ = True def postgres_requirement(_f): if psycopg2 is None: print("Skipping postgres tests, psycopg2 not importable") return False try: auth = tests.get_database_auth()['psycopg2'].copy() psycopg2.connect(**auth) return True except psycopg2.OperationalError: print("Skipping postgres tests, error when connecting") return False class Psycopg2ConnectionPool(object): dummy_table_sql = """CREATE TEMPORARY TABLE test_table ( row_id SERIAL PRIMARY KEY, value_int INTEGER, value_float FLOAT, value_string VARCHAR(200), value_uuid CHAR(36), value_binary BYTEA, value_binary_string BYTEA, created TIMESTAMP );""" @tests.skip_unless(postgres_requirement) def setUp(self): self._dbmodule = psycopg2 self._auth = tests.get_database_auth()['psycopg2'] super(Psycopg2ConnectionPool, self).setUp() def tearDown(self): super(Psycopg2ConnectionPool, self).tearDown() def create_db(self): dbname = 'test%s' % os.getpid() self._auth['database'] = dbname try: self.drop_db() except Exception: pass auth = self._auth.copy() auth.pop('database') # can't create if you're connecting to it conn = self._dbmodule.connect(**auth) conn.set_isolation_level(0) db = conn.cursor() db.execute("create database " + dbname) db.close() conn.close() def drop_db(self): auth = self._auth.copy() auth.pop('database') # can't drop database we connected to conn = self._dbmodule.connect(**auth) conn.set_isolation_level(0) db = conn.cursor() db.execute("drop database " + self._auth['database']) db.close() conn.close() class TestPsycopg2Base(tests.LimitedTestCase): __test__ = False def test_cursor_works_as_context_manager(self): with self.connection.cursor() as c: c.execute('select 1') row = c.fetchone() assert row == (1,) def test_set_isolation_level(self): self.connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) class Test01Psycopg2Tpool(Psycopg2ConnectionPool, TpoolConnectionPool, TestPsycopg2Base): __test__ = True class Test02Psycopg2Raw(Psycopg2ConnectionPool, RawConnectionPool, TestPsycopg2Base): __test__ = True eventlet-0.30.2/tests/debug_test.py0000644000076500000240000001034614006212666017724 0ustar temotostaff00000000000000import sys from eventlet import debug import six import tests import eventlet class TestSpew(tests.LimitedTestCase): def setUp(self): self.orig_trace = sys.settrace sys.settrace = self._settrace self.tracer = None def tearDown(self): sys.settrace = self.orig_trace sys.stdout = sys.__stdout__ def _settrace(self, cb): self.tracer = cb def test_spew(self): debug.spew() assert isinstance(self.tracer, debug.Spew) def test_unspew(self): debug.spew() debug.unspew() assert self.tracer is None def test_line(self): if sys.version_info >= (3, 7): frame_str = "f== (3, 7): frame_str = "f== delay def test_wait_timeout_exceed(): evt = eventlet.Event() delay = 0.1 eventlet.spawn_after(delay * 2, evt.send, True) t1 = eventlet.hubs.get_hub().clock() with eventlet.Timeout(delay, False): result = evt.wait(timeout=delay) td = eventlet.hubs.get_hub().clock() - t1 assert not result assert td >= delay eventlet-0.30.2/tests/green_http_test.py0000644000076500000240000000145414006212666020775 0ustar temotostaff00000000000000import eventlet import six import tests __test__ = six.PY3 def test_green_http_doesnt_change_original_module(): tests.run_isolated('green_http_doesnt_change_original_module.py') def test_green_httplib_doesnt_change_original_module(): tests.run_isolated('green_httplib_doesnt_change_original_module.py') def test_http_request_encode_chunked_kwarg(): # https://bugs.python.org/issue12319 # As of 2017-01 this test only verifies encode_chunked kwarg is properly accepted. # Stdlib http.client code was copied partially, chunked encoding may not work. from eventlet.green.http import client server_sock = eventlet.listen(('127.0.0.1', 0)) addr = server_sock.getsockname() h = client.HTTPConnection(host=addr[0], port=addr[1]) h.request('GET', '/', encode_chunked=True) eventlet-0.30.2/tests/green_profile_test.py0000644000076500000240000000041114006212666021446 0ustar temotostaff00000000000000import eventlet from eventlet.green import profile import tests def test_green_profile_basic(): statement = 'eventlet.sleep()' result = profile.Profile().runctx(statement, {'eventlet': eventlet}, {}) assert ('profile', 0, statement) in result.timings eventlet-0.30.2/tests/green_select_test.py0000644000076500000240000000143214006212666021271 0ustar temotostaff00000000000000import eventlet from eventlet import hubs from eventlet.green import select import tests original_socket = eventlet.patcher.original('socket') def test_select_mark_file_as_reopened(): # https://github.com/eventlet/eventlet/pull/294 # Fix API inconsistency in select and Hub. # mark_as_closed takes one argument, but called without arguments. # on_error takes file descriptor, but called with an exception object. s = original_socket.socket() s.setblocking(0) s.bind(('127.0.0.1', 0)) s.listen(5) gt = eventlet.spawn(select.select, [s], [s], [s]) eventlet.sleep(0.01) with eventlet.Timeout(0.5) as t: with tests.assert_raises(hubs.IOClosed): hubs.get_hub().mark_as_reopened(s.fileno()) gt.wait() t.cancel() eventlet-0.30.2/tests/greendns_test.py0000644000076500000240000011140714006212666020443 0ustar temotostaff00000000000000# coding: utf-8 """Tests for the eventlet.support.greendns module""" import os import socket import tempfile import time from dns.resolver import NoAnswer, Answer, Resolver from eventlet.support import greendns from eventlet.support.greendns import dns import tests import tests.mock def _make_host_resolver(): """Returns a HostResolver instance The hosts file will be empty but accessible as a py.path.local instance using the ``hosts`` attribute. """ hosts = tempfile.NamedTemporaryFile() hr = greendns.HostsResolver(fname=hosts.name) hr.hosts = hosts hr._last_stat = 0 return hr class TestHostsResolver(tests.LimitedTestCase): def test_default_fname(self): hr = greendns.HostsResolver() assert os.path.exists(hr.fname) def test_readlines_lines(self): hr = _make_host_resolver() hr.hosts.write(b'line0\n') hr.hosts.flush() assert list(hr._readlines()) == ['line0'] hr._last_stat = 0 hr.hosts.write(b'line1\n') hr.hosts.flush() assert list(hr._readlines()) == ['line0', 'line1'] # Test reading of varied newline styles hr._last_stat = 0 hr.hosts.seek(0) hr.hosts.truncate() hr.hosts.write(b'\naa\r\nbb\r cc \n\n\tdd ee') hr.hosts.flush() assert list(hr._readlines()) == ['aa', 'bb', 'cc', 'dd ee'] # Test comments, including inline comments hr._last_stat = 0 hr.hosts.seek(0) hr.hosts.truncate() hr.hosts.write(b'''\ # First couple lines # are comments. line1 #comment line2 # inline comment ''') hr.hosts.flush() assert list(hr._readlines()) == ['line1', 'line2'] def test_readlines_missing_file(self): hr = _make_host_resolver() hr.hosts.close() hr._last_stat = 0 assert list(hr._readlines()) == [] def test_load_no_contents(self): hr = _make_host_resolver() hr._load() assert not hr._v4 assert not hr._v6 assert not hr._aliases def test_load_v4_v6_cname_aliases(self): hr = _make_host_resolver() hr.hosts.write(b'1.2.3.4 v4.example.com v4\n' b'dead:beef::1 v6.example.com v6\n') hr.hosts.flush() hr._load() assert hr._v4 == {'v4.example.com': '1.2.3.4', 'v4': '1.2.3.4'} assert hr._v6 == {'v6.example.com': 'dead:beef::1', 'v6': 'dead:beef::1'} assert hr._aliases == {'v4': 'v4.example.com', 'v6': 'v6.example.com'} def test_load_v6_link_local(self): hr = _make_host_resolver() hr.hosts.write(b'fe80:: foo\n' b'fe80:dead:beef::1 bar\n') hr.hosts.flush() hr._load() assert not hr._v4 assert not hr._v6 def test_query_A(self): hr = _make_host_resolver() hr._v4 = {'v4.example.com': '1.2.3.4'} ans = hr.query('v4.example.com') assert ans[0].address == '1.2.3.4' def test_query_ans_types(self): # This assumes test_query_A above succeeds hr = _make_host_resolver() hr._v4 = {'v4.example.com': '1.2.3.4'} hr._last_stat = time.time() ans = hr.query('v4.example.com') assert isinstance(ans, greendns.dns.resolver.Answer) assert ans.response is None assert ans.qname == dns.name.from_text('v4.example.com') assert ans.rdtype == dns.rdatatype.A assert ans.rdclass == dns.rdataclass.IN assert ans.canonical_name == dns.name.from_text('v4.example.com') assert ans.expiration assert isinstance(ans.rrset, dns.rrset.RRset) assert ans.rrset.rdtype == dns.rdatatype.A assert ans.rrset.rdclass == dns.rdataclass.IN ttl = greendns.HOSTS_TTL assert ttl - 1 <= ans.rrset.ttl <= ttl + 1 rr = ans.rrset[0] assert isinstance(rr, greendns.dns.rdtypes.IN.A.A) assert rr.rdtype == dns.rdatatype.A assert rr.rdclass == dns.rdataclass.IN assert rr.address == '1.2.3.4' def test_query_AAAA(self): hr = _make_host_resolver() hr._v6 = {'v6.example.com': 'dead:beef::1'} ans = hr.query('v6.example.com', dns.rdatatype.AAAA) assert ans[0].address == 'dead:beef::1' def test_query_unknown_raises(self): hr = _make_host_resolver() with tests.assert_raises(greendns.dns.resolver.NoAnswer): hr.query('example.com') def test_query_unknown_no_raise(self): hr = _make_host_resolver() ans = hr.query('example.com', raise_on_no_answer=False) assert isinstance(ans, greendns.dns.resolver.Answer) assert ans.response is None assert ans.qname == dns.name.from_text('example.com') assert ans.rdtype == dns.rdatatype.A assert ans.rdclass == dns.rdataclass.IN assert ans.canonical_name == dns.name.from_text('example.com') assert ans.expiration assert isinstance(ans.rrset, greendns.dns.rrset.RRset) assert ans.rrset.rdtype == dns.rdatatype.A assert ans.rrset.rdclass == dns.rdataclass.IN assert len(ans.rrset) == 0 def test_query_CNAME(self): hr = _make_host_resolver() hr._aliases = {'host': 'host.example.com'} ans = hr.query('host', dns.rdatatype.CNAME) assert ans[0].target == dns.name.from_text('host.example.com') assert str(ans[0].target) == 'host.example.com.' def test_query_unknown_type(self): hr = _make_host_resolver() with tests.assert_raises(greendns.dns.resolver.NoAnswer): hr.query('example.com', dns.rdatatype.MX) def test_getaliases(self): hr = _make_host_resolver() hr._aliases = {'host': 'host.example.com', 'localhost': 'host.example.com'} res = set(hr.getaliases('host')) assert res == set(['host.example.com', 'localhost']) def test_getaliases_unknown(self): hr = _make_host_resolver() assert hr.getaliases('host.example.com') == [] def test_getaliases_fqdn(self): hr = _make_host_resolver() hr._aliases = {'host': 'host.example.com'} res = set(hr.getaliases('host.example.com')) assert res == set(['host']) def test_hosts_case_insensitive(self): name = 'example.com' hr = _make_host_resolver() hr.hosts.write(b'1.2.3.4 ExAmPlE.CoM\n') hr.hosts.flush() hr._load() ans = hr.query(name) rr = ans.rrset[0] assert isinstance(rr, greendns.dns.rdtypes.IN.A.A) assert rr.rdtype == dns.rdatatype.A assert rr.rdclass == dns.rdataclass.IN assert rr.address == '1.2.3.4' def _make_mock_base_resolver(): """A mocked base resolver class""" class RR(object): pass class Resolver(object): aliases = ['cname.example.com'] raises = None rr = RR() rr6 = RR() def query(self, *args, **kwargs): self.args = args self.kwargs = kwargs if self.raises: raise self.raises() if hasattr(self, 'rrset'): rrset = self.rrset else: if self.rr6 and self.args[1] == dns.rdatatype.AAAA: rrset = [self.rr6] else: rrset = [self.rr] return greendns.HostsAnswer('foo', 1, 1, rrset, False) def getaliases(self, *args, **kwargs): return self.aliases return Resolver class TestUdp(tests.LimitedTestCase): def setUp(self): # Store this so we can reuse it for each test self.query = greendns.dns.message.Message() self.query.flags = greendns.dns.flags.QR self.query_wire = self.query.to_wire() super(TestUdp, self).setUp() def test_udp_ipv4(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', return_value=(self.query_wire, ('127.0.0.1', 53))): greendns.udp(self.query, '127.0.0.1') def test_udp_ipv4_timeout(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', side_effect=socket.timeout): with tests.assert_raises(dns.exception.Timeout): greendns.udp(self.query, '127.0.0.1', timeout=0.1) def test_udp_ipv4_wrong_addr_ignore(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', side_effect=socket.timeout): with tests.assert_raises(dns.exception.Timeout): greendns.udp(self.query, '127.0.0.1', timeout=0.1, ignore_unexpected=True) def test_udp_ipv4_wrong_addr(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', return_value=(self.query_wire, ('127.0.0.2', 53))): with tests.assert_raises(dns.query.UnexpectedSource): greendns.udp(self.query, '127.0.0.1') def test_udp_ipv6(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', return_value=(self.query_wire, ('::1', 53, 0, 0))): greendns.udp(self.query, '::1') def test_udp_ipv6_timeout(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', side_effect=socket.timeout): with tests.assert_raises(dns.exception.Timeout): greendns.udp(self.query, '::1', timeout=0.1) def test_udp_ipv6_addr_zeroes(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', return_value=(self.query_wire, ('0:00:0000::1', 53, 0, 0))): greendns.udp(self.query, '::1') def test_udp_ipv6_wrong_addr_ignore(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', side_effect=socket.timeout): with tests.assert_raises(dns.exception.Timeout): greendns.udp(self.query, '::1', timeout=0.1, ignore_unexpected=True) def test_udp_ipv6_wrong_addr(self): with tests.mock.patch('eventlet.support.greendns.socket.socket.recvfrom', return_value=(self.query_wire, ('ffff:0000::1', 53, 0, 0))): with tests.assert_raises(dns.query.UnexpectedSource): greendns.udp(self.query, '::1') class TestProxyResolver(tests.LimitedTestCase): def test_clear(self): rp = greendns.ResolverProxy() resolver = rp._resolver rp.clear() assert rp._resolver != resolver def _make_mock_hostsresolver(self): """A mocked HostsResolver""" base_resolver = _make_mock_base_resolver() base_resolver.rr.address = '1.2.3.4' return base_resolver() def _make_mock_resolver(self): """A mocked Resolver""" base_resolver = _make_mock_base_resolver() base_resolver.rr.address = '5.6.7.8' return base_resolver() def test_hosts(self): hostsres = self._make_mock_hostsresolver() rp = greendns.ResolverProxy(hostsres) ans = rp.query('host.example.com') assert ans[0].address == '1.2.3.4' def test_hosts_noanswer(self): hostsres = self._make_mock_hostsresolver() res = self._make_mock_resolver() rp = greendns.ResolverProxy(hostsres) rp._resolver = res hostsres.raises = greendns.dns.resolver.NoAnswer ans = rp.query('host.example.com') assert ans[0].address == '5.6.7.8' def test_resolver(self): res = self._make_mock_resolver() rp = greendns.ResolverProxy() rp._resolver = res ans = rp.query('host.example.com') assert ans[0].address == '5.6.7.8' def test_noanswer(self): res = self._make_mock_resolver() rp = greendns.ResolverProxy() rp._resolver = res res.raises = greendns.dns.resolver.NoAnswer with tests.assert_raises(greendns.dns.resolver.NoAnswer): rp.query('host.example.com') def test_nxdomain(self): res = self._make_mock_resolver() rp = greendns.ResolverProxy() rp._resolver = res res.raises = greendns.dns.resolver.NXDOMAIN with tests.assert_raises(greendns.dns.resolver.NXDOMAIN): rp.query('host.example.com') def test_noanswer_hosts(self): hostsres = self._make_mock_hostsresolver() res = self._make_mock_resolver() rp = greendns.ResolverProxy(hostsres) rp._resolver = res hostsres.raises = greendns.dns.resolver.NoAnswer res.raises = greendns.dns.resolver.NoAnswer with tests.assert_raises(greendns.dns.resolver.NoAnswer): rp.query('host.example.com') def _make_mock_resolver_aliases(self): class RR(object): target = 'host.example.com' class Resolver(object): call_count = 0 exc_type = greendns.dns.resolver.NoAnswer def query(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.call_count += 1 if self.call_count < 2: return greendns.HostsAnswer(args[0], 1, 5, [RR()], False) else: raise self.exc_type() return Resolver() def test_getaliases(self): aliases_res = self._make_mock_resolver_aliases() rp = greendns.ResolverProxy() rp._resolver = aliases_res aliases = set(rp.getaliases('alias.example.com')) assert aliases == set(['host.example.com']) def test_getaliases_fqdn(self): aliases_res = self._make_mock_resolver_aliases() rp = greendns.ResolverProxy() rp._resolver = aliases_res rp._resolver.call_count = 1 assert rp.getaliases('host.example.com') == [] def test_getaliases_nxdomain(self): aliases_res = self._make_mock_resolver_aliases() rp = greendns.ResolverProxy() rp._resolver = aliases_res rp._resolver.call_count = 1 rp._resolver.exc_type = greendns.dns.resolver.NXDOMAIN assert rp.getaliases('host.example.com') == [] class TestResolve(tests.LimitedTestCase): def setUp(self): base_resolver = _make_mock_base_resolver() base_resolver.rr.address = '1.2.3.4' self._old_resolver = greendns.resolver greendns.resolver = base_resolver() def tearDown(self): greendns.resolver = self._old_resolver def test_A(self): ans = greendns.resolve('host.example.com', socket.AF_INET) assert ans[0].address == '1.2.3.4' assert greendns.resolver.args == ('host.example.com', dns.rdatatype.A) def test_AAAA(self): greendns.resolver.rr6.address = 'dead:beef::1' ans = greendns.resolve('host.example.com', socket.AF_INET6) assert ans[0].address == 'dead:beef::1' assert greendns.resolver.args == ('host.example.com', dns.rdatatype.AAAA) def test_unknown_rdtype(self): with tests.assert_raises(socket.gaierror): greendns.resolve('host.example.com', socket.AF_INET6 + 1) def test_timeout(self): greendns.resolver.raises = greendns.dns.exception.Timeout with tests.assert_raises(socket.gaierror): greendns.resolve('host.example.com') def test_exc(self): greendns.resolver.raises = greendns.dns.exception.DNSException with tests.assert_raises(socket.gaierror): greendns.resolve('host.example.com') def test_noraise_noanswer(self): greendns.resolver.rrset = None ans = greendns.resolve('example.com', raises=False) assert not ans.rrset def test_noraise_nxdomain(self): greendns.resolver.raises = greendns.dns.resolver.NXDOMAIN ans = greendns.resolve('example.com', raises=False) assert not ans.rrset class TestResolveCname(tests.LimitedTestCase): def setUp(self): base_resolver = _make_mock_base_resolver() base_resolver.rr.target = 'cname.example.com' self._old_resolver = greendns.resolver greendns.resolver = base_resolver() def tearDown(self): greendns.resolver = self._old_resolver def test_success(self): cname = greendns.resolve_cname('alias.example.com') assert cname == 'cname.example.com' def test_timeout(self): greendns.resolver.raises = greendns.dns.exception.Timeout with tests.assert_raises(socket.gaierror): greendns.resolve_cname('alias.example.com') def test_nodata(self): greendns.resolver.raises = greendns.dns.exception.DNSException with tests.assert_raises(socket.gaierror): greendns.resolve_cname('alias.example.com') def test_no_answer(self): greendns.resolver.raises = greendns.dns.resolver.NoAnswer assert greendns.resolve_cname('host.example.com') == 'host.example.com' def _make_mock_resolve(): """A stubbed out resolve function This monkeypatches the greendns.resolve() function with a mock. You must give it answers by calling .add(). """ class MockAnswer(list): pass class MockResolve(object): def __init__(self): self.answers = {} def __call__(self, name, family=socket.AF_INET, raises=True, _proxy=None, use_network=True): qname = dns.name.from_text(name) try: rrset = self.answers[name][family] except KeyError: if raises: raise greendns.dns.resolver.NoAnswer() rrset = dns.rrset.RRset(qname, 1, 1) ans = MockAnswer() ans.qname = qname ans.rrset = rrset ans.extend(rrset.items) return ans def add(self, name, addr): """Add an address to a name and family""" try: rdata = dns.rdtypes.IN.A.A(dns.rdataclass.IN, dns.rdatatype.A, addr) family = socket.AF_INET except (socket.error, dns.exception.SyntaxError): rdata = dns.rdtypes.IN.AAAA.AAAA(dns.rdataclass.IN, dns.rdatatype.AAAA, addr) family = socket.AF_INET6 family_dict = self.answers.setdefault(name, {}) rrset = family_dict.get(family) if not rrset: family_dict[family] = rrset = dns.rrset.RRset( dns.name.from_text(name), rdata.rdclass, rdata.rdtype) rrset.add(rdata) resolve = MockResolve() return resolve class TestGetaddrinfo(tests.LimitedTestCase): def _make_mock_resolve_cname(self): """A stubbed out cname function""" class ResolveCname(object): qname = None cname = 'cname.example.com' def __call__(self, host): self.qname = host return self.cname resolve_cname = ResolveCname() return resolve_cname def setUp(self): self._old_resolve = greendns.resolve self._old_resolve_cname = greendns.resolve_cname self._old_orig_getaddrinfo = greendns.socket.getaddrinfo def tearDown(self): greendns.resolve = self._old_resolve greendns.resolve_cname = self._old_resolve_cname greendns.socket.getaddrinfo = self._old_orig_getaddrinfo def test_getaddrinfo(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', '127.0.0.2') greendns.resolve.add('example.com', '::1') res = greendns.getaddrinfo('example.com', 'domain') addr = ('127.0.0.2', 53) tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) addr = ('::1', 53, 0, 0) tcp6 = (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp6 = (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) filt_res = [ai[:3] + (ai[4],) for ai in res] assert tcp in filt_res assert udp in filt_res assert tcp6 in filt_res assert udp6 in filt_res def test_getaddrinfo_idn(self): greendns.resolve = _make_mock_resolve() idn_name = u'евентлет.com' greendns.resolve.add(idn_name.encode('idna').decode('ascii'), '127.0.0.2') res = greendns.getaddrinfo(idn_name, 'domain') addr = ('127.0.0.2', 53) tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) filt_res = [ai[:3] + (ai[4],) for ai in res] assert tcp in filt_res assert udp in filt_res def test_getaddrinfo_inet(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', '127.0.0.2') res = greendns.getaddrinfo('example.com', 'domain', socket.AF_INET) addr = ('127.0.0.2', 53) tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) assert tcp in [ai[:3] + (ai[4],) for ai in res] assert udp in [ai[:3] + (ai[4],) for ai in res] def test_getaddrinfo_inet6(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', '::1') res = greendns.getaddrinfo('example.com', 'domain', socket.AF_INET6) addr = ('::1', 53, 0, 0) tcp = (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp = (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) assert tcp in [ai[:3] + (ai[4],) for ai in res] assert udp in [ai[:3] + (ai[4],) for ai in res] def test_getaddrinfo_only_a_ans(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', '1.2.3.4') res = greendns.getaddrinfo('example.com', 0) addr = [('1.2.3.4', 0)] * len(res) assert addr == [ai[-1] for ai in res] def test_getaddrinfo_only_aaaa_ans(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', 'dead:beef::1') res = greendns.getaddrinfo('example.com', 0) addr = [('dead:beef::1', 0, 0, 0)] * len(res) assert addr == [ai[-1] for ai in res] def test_getaddrinfo_hosts_only_ans_with_timeout(self): def clear_raises(res_self): res_self.raises = None return greendns.dns.resolver.NoAnswer() hostsres = _make_mock_base_resolver() hostsres.raises = clear_raises hostsres.rr.address = '1.2.3.4' greendns.resolver = greendns.ResolverProxy(hostsres()) res = _make_mock_base_resolver() res.raises = greendns.dns.exception.Timeout greendns.resolver._resolver = res() result = greendns.getaddrinfo('example.com', 0, 0) addr = [('1.2.3.4', 0)] * len(result) assert addr == [ai[-1] for ai in result] def test_getaddrinfo_hosts_only_ans_with_error(self): def clear_raises(res_self): res_self.raises = None return greendns.dns.resolver.NoAnswer() hostsres = _make_mock_base_resolver() hostsres.raises = clear_raises hostsres.rr.address = '1.2.3.4' greendns.resolver = greendns.ResolverProxy(hostsres()) res = _make_mock_base_resolver() res.raises = greendns.dns.exception.DNSException greendns.resolver._resolver = res() result = greendns.getaddrinfo('example.com', 0, 0) addr = [('1.2.3.4', 0)] * len(result) assert addr == [ai[-1] for ai in result] def test_getaddrinfo_hosts_only_timeout(self): hostsres = _make_mock_base_resolver() hostsres.raises = greendns.dns.resolver.NoAnswer greendns.resolver = greendns.ResolverProxy(hostsres()) res = _make_mock_base_resolver() res.raises = greendns.dns.exception.Timeout greendns.resolver._resolver = res() with tests.assert_raises(socket.gaierror): greendns.getaddrinfo('example.com', 0, 0) def test_getaddrinfo_hosts_only_dns_error(self): hostsres = _make_mock_base_resolver() hostsres.raises = greendns.dns.resolver.NoAnswer greendns.resolver = greendns.ResolverProxy(hostsres()) res = _make_mock_base_resolver() res.raises = greendns.dns.exception.DNSException greendns.resolver._resolver = res() with tests.assert_raises(socket.gaierror): greendns.getaddrinfo('example.com', 0, 0) def test_canonname(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('host.example.com', '1.2.3.4') greendns.resolve_cname = self._make_mock_resolve_cname() res = greendns.getaddrinfo('host.example.com', 0, 0, 0, 0, socket.AI_CANONNAME) assert res[0][3] == 'cname.example.com' def test_host_none(self): res = greendns.getaddrinfo(None, 80) for addr in set(ai[-1] for ai in res): assert addr in [('127.0.0.1', 80), ('::1', 80, 0, 0)] def test_host_none_passive(self): res = greendns.getaddrinfo(None, 80, 0, 0, 0, socket.AI_PASSIVE) for addr in set(ai[-1] for ai in res): assert addr in [('0.0.0.0', 80), ('::', 80, 0, 0)] def test_v4mapped(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', '1.2.3.4') res = greendns.getaddrinfo('example.com', 80, socket.AF_INET6, 0, 0, socket.AI_V4MAPPED) addrs = set(ai[-1] for ai in res) assert addrs == set([('::ffff:1.2.3.4', 80, 0, 0)]) def test_v4mapped_all(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', '1.2.3.4') greendns.resolve.add('example.com', 'dead:beef::1') res = greendns.getaddrinfo('example.com', 80, socket.AF_INET6, 0, 0, socket.AI_V4MAPPED | socket.AI_ALL) addrs = set(ai[-1] for ai in res) for addr in addrs: assert addr in [('::ffff:1.2.3.4', 80, 0, 0), ('dead:beef::1', 80, 0, 0)] def test_numericserv(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', '1.2.3.4') with tests.assert_raises(socket.gaierror): greendns.getaddrinfo('example.com', 'www', 0, 0, 0, socket.AI_NUMERICSERV) def test_numerichost(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', '1.2.3.4') with tests.assert_raises(socket.gaierror): greendns.getaddrinfo('example.com', 80, 0, 0, 0, socket.AI_NUMERICHOST) def test_noport(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', '1.2.3.4') ai = greendns.getaddrinfo('example.com', None) assert ai[0][-1][1] == 0 def test_AI_ADDRCONFIG(self): # When the users sets AI_ADDRCONFIG but only has an IPv4 # address configured we will iterate over the results, but the # call for the IPv6 address will fail rather then return an # empty list. In that case we should catch the exception and # only return the ones which worked. def getaddrinfo(addr, port, family, socktype, proto, aiflags): if addr == '127.0.0.1': return [(socket.AF_INET, 1, 0, '', ('127.0.0.1', 0))] elif addr == '::1' and aiflags & socket.AI_ADDRCONFIG: raise socket.error(socket.EAI_ADDRFAMILY, 'Address family for hostname not supported') elif addr == '::1' and not aiflags & socket.AI_ADDRCONFIG: return [(socket.AF_INET6, 1, 0, '', ('::1', 0, 0, 0))] greendns.socket.getaddrinfo = getaddrinfo greendns.resolve = _make_mock_resolve() greendns.resolve.add('localhost', '127.0.0.1') greendns.resolve.add('localhost', '::1') res = greendns.getaddrinfo('localhost', None, 0, 0, 0, socket.AI_ADDRCONFIG) assert res == [(socket.AF_INET, 1, 0, '', ('127.0.0.1', 0))] def test_AI_ADDRCONFIG_noaddr(self): # If AI_ADDRCONFIG is used but there is no address we need to # get an exception, not an empty list. def getaddrinfo(addr, port, family, socktype, proto, aiflags): raise socket.error(socket.EAI_ADDRFAMILY, 'Address family for hostname not supported') greendns.socket.getaddrinfo = getaddrinfo greendns.resolve = _make_mock_resolve() try: greendns.getaddrinfo('::1', None, 0, 0, 0, socket.AI_ADDRCONFIG) except socket.error as e: assert e.errno == socket.EAI_ADDRFAMILY class TestIsIpAddr(tests.LimitedTestCase): def test_isv4(self): assert greendns.is_ipv4_addr('1.2.3.4') def test_isv4_false(self): assert not greendns.is_ipv4_addr('260.0.0.0') def test_isv6(self): assert greendns.is_ipv6_addr('dead:beef::1') def test_isv6_invalid(self): assert not greendns.is_ipv6_addr('foobar::1') def test_v4(self): assert greendns.is_ip_addr('1.2.3.4') def test_v4_illegal(self): assert not greendns.is_ip_addr('300.0.0.1') def test_v6_addr(self): assert greendns.is_ip_addr('::1') def test_isv4_none(self): assert not greendns.is_ipv4_addr(None) def test_isv6_none(self): assert not greendns.is_ipv6_addr(None) def test_none(self): assert not greendns.is_ip_addr(None) class TestGethostbyname(tests.LimitedTestCase): def setUp(self): self._old_resolve = greendns.resolve greendns.resolve = _make_mock_resolve() def tearDown(self): greendns.resolve = self._old_resolve def test_ipaddr(self): assert greendns.gethostbyname('1.2.3.4') == '1.2.3.4' def test_name(self): greendns.resolve.add('host.example.com', '1.2.3.4') assert greendns.gethostbyname('host.example.com') == '1.2.3.4' class TestGetaliases(tests.LimitedTestCase): def _make_mock_resolver(self): base_resolver = _make_mock_base_resolver() resolver = base_resolver() resolver.aliases = ['cname.example.com'] return resolver def setUp(self): self._old_resolver = greendns.resolver greendns.resolver = self._make_mock_resolver() def tearDown(self): greendns.resolver = self._old_resolver def test_getaliases(self): assert greendns.getaliases('host.example.com') == ['cname.example.com'] class TestGethostbyname_ex(tests.LimitedTestCase): def _make_mock_getaliases(self): class GetAliases(object): aliases = ['cname.example.com'] def __call__(self, *args, **kwargs): return self.aliases getaliases = GetAliases() return getaliases def setUp(self): self._old_resolve = greendns.resolve greendns.resolve = _make_mock_resolve() self._old_getaliases = greendns.getaliases def tearDown(self): greendns.resolve = self._old_resolve greendns.getaliases = self._old_getaliases def test_ipaddr(self): res = greendns.gethostbyname_ex('1.2.3.4') assert res == ('1.2.3.4', [], ['1.2.3.4']) def test_name(self): greendns.resolve.add('host.example.com', '1.2.3.4') greendns.getaliases = self._make_mock_getaliases() greendns.getaliases.aliases = [] res = greendns.gethostbyname_ex('host.example.com') assert res == ('host.example.com', [], ['1.2.3.4']) def test_multiple_addrs(self): greendns.resolve.add('host.example.com', '1.2.3.4') greendns.resolve.add('host.example.com', '1.2.3.5') greendns.getaliases = self._make_mock_getaliases() greendns.getaliases.aliases = [] res = greendns.gethostbyname_ex('host.example.com') assert res == ('host.example.com', [], ['1.2.3.4', '1.2.3.5']) class TinyDNSTests(tests.LimitedTestCase): def test_raise_dns_tcp(self): # https://github.com/eventlet/eventlet/issues/499 # None means we don't want the server to find the IP with tests.dns_tcp_server(None) as dnsaddr: resolver = Resolver() resolver.nameservers = [dnsaddr[0]] resolver.nameserver_ports[dnsaddr[0]] = dnsaddr[1] with self.assertRaises(NoAnswer): resolver.query('host.example.com', 'a', tcp=True) def test_noraise_dns_tcp(self): # https://github.com/eventlet/eventlet/issues/499 expected_ip = "192.168.1.1" with tests.dns_tcp_server(expected_ip) as dnsaddr: resolver = Resolver() resolver.nameservers = [dnsaddr[0]] resolver.nameserver_ports[dnsaddr[0]] = dnsaddr[1] response = resolver.query('host.example.com', 'a', tcp=True) self.assertIsInstance(response, Answer) self.assertEqual(response.rrset.items[0].address, expected_ip) def test_reverse_name(): tests.run_isolated('greendns_from_address_203.py') def test_proxy_resolve_unqualified(): # https://github.com/eventlet/eventlet/issues/363 rp = greendns.ResolverProxy(filename=None) rp._resolver.search.append(dns.name.from_text('example.com')) with tests.mock.patch('dns.resolver.Resolver.query', side_effect=dns.resolver.NoAnswer) as m: try: rp.query('machine') assert False, 'Expected NoAnswer exception' except dns.resolver.NoAnswer: pass assert any(call[0][0] == dns.name.from_text('machine') for call in m.call_args_list) assert any(call[0][0] == dns.name.from_text('machine.') for call in m.call_args_list) def test_hosts_priority(): name = 'example.com' addr_from_ns = '1.0.2.0' hr = _make_host_resolver() rp = greendns.ResolverProxy(hosts_resolver=hr, filename=None) base = _make_mock_base_resolver() base.rr.address = addr_from_ns rp._resolver = base() # Default behavior rrns = greendns.resolve(name, _proxy=rp).rrset[0] assert rrns.address == addr_from_ns # Hosts result must shadow that from nameservers hr.hosts.write(b'1.2.3.4 example.com\ndead:beef::1 example.com\n') hr.hosts.flush() hr._load() rrs4 = greendns.resolve(name, family=socket.AF_INET, _proxy=rp).rrset assert len(rrs4) == 1 assert rrs4[0].address == '1.2.3.4', rrs4[0].address rrs6 = greendns.resolve(name, family=socket.AF_INET6, _proxy=rp).rrset assert len(rrs6) == 1 assert rrs6[0].address == 'dead:beef::1', rrs6[0].address def test_hosts_no_network(): name = 'example.com' addr_from_ns = '1.0.2.0' addr6_from_ns = 'dead:beef::1' hr = _make_host_resolver() rp = greendns.ResolverProxy(hosts_resolver=hr, filename=None) base = _make_mock_base_resolver() base.rr.address = addr_from_ns base.rr6.address = addr6_from_ns rp._resolver = base() with tests.mock.patch.object(greendns, 'resolver', new_callable=tests.mock.PropertyMock(return_value=rp)): res = greendns.getaddrinfo('example.com', 'domain', socket.AF_UNSPEC) # Default behavior addr = (addr_from_ns, 53) tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) addr = (addr6_from_ns, 53, 0, 0) tcp6 = (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp6 = (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) filt_res = [ai[:3] + (ai[4],) for ai in res] assert tcp in filt_res assert udp in filt_res assert tcp6 in filt_res assert udp6 in filt_res # Hosts result must shadow that from nameservers hr = _make_host_resolver() hr.hosts.write(b'1.2.3.4 example.com') hr.hosts.flush() hr._load() greendns.resolver._hosts = hr res = greendns.getaddrinfo('example.com', 'domain', socket.AF_UNSPEC) filt_res = [ai[:3] + (ai[4],) for ai in res] # Make sure that only IPv4 entry from hosts is present. assert tcp not in filt_res assert udp not in filt_res assert tcp6 not in filt_res assert udp6 not in filt_res addr = ('1.2.3.4', 53) tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) assert tcp in filt_res assert udp in filt_res def test_import_rdtypes_then_eventlet(): # https://github.com/eventlet/eventlet/issues/479 tests.run_isolated('greendns_import_rdtypes_then_eventlet.py') eventlet-0.30.2/tests/greenio_test.py0000644000076500000240000010276414006212666020274 0ustar temotostaff00000000000000import array import errno import fcntl import gc from io import DEFAULT_BUFFER_SIZE import os import shutil import socket as _orig_sock import sys import tempfile from nose.tools import eq_ import eventlet from eventlet import event, greenio, debug from eventlet.hubs import get_hub from eventlet.green import select, socket, time, ssl from eventlet.support import get_errno import six import tests import tests.mock as mock def bufsized(sock, size=1): """ Resize both send and receive buffers on a socket. Useful for testing trampoline. Returns the socket. >>> import socket >>> sock = bufsized(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) """ sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size) sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, size) return sock def expect_socket_timeout(function, *args): try: function(*args) raise AssertionError("socket.timeout not raised") except socket.timeout as e: assert hasattr(e, 'args') eq_(e.args[0], 'timed out') def min_buf_size(): """Return the minimum buffer size that the platform supports.""" test_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) test_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1) return test_sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) def using_epoll_hub(_f): try: return 'epolls' in type(get_hub()).__module__ except Exception: return False def using_kqueue_hub(_f): try: return 'kqueue' in type(get_hub()).__module__ except Exception: return False class TestGreenSocket(tests.LimitedTestCase): def assertWriteToClosedFileRaises(self, fd): if sys.version_info[0] < 3: # 2.x socket._fileobjects are odd: writes don't check # whether the socket is closed or not, and you get an # AttributeError during flush if it is closed fd.write(b'a') self.assertRaises(Exception, fd.flush) else: # 3.x io write to closed file-like pbject raises ValueError self.assertRaises(ValueError, fd.write, b'a') def test_connect_timeout(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.1) gs = greenio.GreenSocket(s) try: expect_socket_timeout(gs.connect, ('192.0.2.1', 80)) except socket.error as e: # unreachable is also a valid outcome if not get_errno(e) in (errno.EHOSTUNREACH, errno.ENETUNREACH): raise def test_accept_timeout(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) s.listen(50) s.settimeout(0.1) gs = greenio.GreenSocket(s) expect_socket_timeout(gs.accept) def test_connect_ex_timeout(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.1) gs = greenio.GreenSocket(s) e = gs.connect_ex(('192.0.2.1', 80)) if e not in (errno.EHOSTUNREACH, errno.ENETUNREACH): self.assertEqual(e, errno.EAGAIN) def test_recv_timeout(self): listener = greenio.GreenSocket(socket.socket()) listener.bind(('', 0)) listener.listen(50) evt = event.Event() def server(): # accept the connection in another greenlet sock, addr = listener.accept() evt.wait() gt = eventlet.spawn(server) addr = listener.getsockname() client = greenio.GreenSocket(socket.socket()) client.settimeout(0.1) client.connect(addr) expect_socket_timeout(client.recv, 0) expect_socket_timeout(client.recv, 8192) evt.send() gt.wait() def test_recvfrom_timeout(self): gs = greenio.GreenSocket( socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) gs.settimeout(.1) gs.bind(('', 0)) expect_socket_timeout(gs.recvfrom, 0) expect_socket_timeout(gs.recvfrom, 8192) def test_recvfrom_into_timeout(self): buf = array.array('B') gs = greenio.GreenSocket( socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) gs.settimeout(.1) gs.bind(('', 0)) expect_socket_timeout(gs.recvfrom_into, buf) def test_recv_into_timeout(self): buf = array.array('B') listener = greenio.GreenSocket(socket.socket()) listener.bind(('', 0)) listener.listen(50) evt = event.Event() def server(): # accept the connection in another greenlet sock, addr = listener.accept() evt.wait() gt = eventlet.spawn(server) addr = listener.getsockname() client = greenio.GreenSocket(socket.socket()) client.settimeout(0.1) client.connect(addr) expect_socket_timeout(client.recv_into, buf) evt.send() gt.wait() def test_send_timeout(self): self.reset_timeout(2) listener = bufsized(eventlet.listen(('', 0))) evt = event.Event() def server(): # accept the connection in another greenlet sock, addr = listener.accept() sock = bufsized(sock) evt.wait() gt = eventlet.spawn(server) addr = listener.getsockname() client = bufsized(greenio.GreenSocket(socket.socket())) client.connect(addr) client.settimeout(0.00001) msg = b"A" * 100000 # large enough number to overwhelm most buffers # want to exceed the size of the OS buffer so it'll block in a # single send def send(): for x in range(10): client.send(msg) expect_socket_timeout(send) evt.send() gt.wait() def test_sendall_timeout(self): listener = greenio.GreenSocket(socket.socket()) listener.bind(('', 0)) listener.listen(50) evt = event.Event() def server(): # accept the connection in another greenlet sock, addr = listener.accept() evt.wait() gt = eventlet.spawn(server) addr = listener.getsockname() client = greenio.GreenSocket(socket.socket()) client.settimeout(0.1) client.connect(addr) # want to exceed the size of the OS buffer so it'll block msg = b"A" * (8 << 20) expect_socket_timeout(client.sendall, msg) evt.send() gt.wait() def test_close_with_makefile(self): def accept_close_early(listener): # verify that the makefile and the socket are truly independent # by closing the socket prior to using the made file try: conn, addr = listener.accept() fd = conn.makefile('wb') conn.close() fd.write(b'hello\n') fd.close() self.assertWriteToClosedFileRaises(fd) self.assertRaises(socket.error, conn.send, b'b') finally: listener.close() def accept_close_late(listener): # verify that the makefile and the socket are truly independent # by closing the made file and then sending a character try: conn, addr = listener.accept() fd = conn.makefile('wb') fd.write(b'hello') fd.close() conn.send(b'\n') conn.close() self.assertWriteToClosedFileRaises(fd) self.assertRaises(socket.error, conn.send, b'b') finally: listener.close() def did_it_work(server): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', server.getsockname()[1])) fd = client.makefile('rb') client.close() assert fd.readline() == b'hello\n' assert fd.read() == b'' fd.close() server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('0.0.0.0', 0)) server.listen(50) killer = eventlet.spawn(accept_close_early, server) did_it_work(server) killer.wait() server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('0.0.0.0', 0)) server.listen(50) killer = eventlet.spawn(accept_close_late, server) did_it_work(server) killer.wait() def test_del_closes_socket(self): def accept_once(listener): # delete/overwrite the original conn # object, only keeping the file object around # closing the file object should close everything try: conn, addr = listener.accept() conn = conn.makefile('wb') conn.write(b'hello\n') conn.close() gc.collect() self.assertWriteToClosedFileRaises(conn) finally: listener.close() server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('127.0.0.1', 0)) server.listen(50) killer = eventlet.spawn(accept_once, server) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', server.getsockname()[1])) fd = client.makefile('rb') client.close() assert fd.read() == b'hello\n' assert fd.read() == b'' killer.wait() def test_blocking_accept_mark_as_reopened(self): evt_hub = get_hub() with mock.patch.object(evt_hub, "mark_as_reopened") as patched_mark_as_reopened: def connect_once(listener): # delete/overwrite the original conn # object, only keeping the file object around # closing the file object should close everything client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', listener.getsockname()[1])) client.close() server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('127.0.0.1', 0)) server.listen(50) acceptlet = eventlet.spawn(connect_once, server) conn, addr = server.accept() conn.sendall(b'hello\n') connfileno = conn.fileno() conn.close() assert patched_mark_as_reopened.called assert patched_mark_as_reopened.call_count == 3, "3 fds were opened, but the hub was " \ "only notified {call_count} times" \ .format(call_count=patched_mark_as_reopened.call_count) args, kwargs = patched_mark_as_reopened.call_args assert args == (connfileno,), "Expected mark_as_reopened to be called " \ "with {expected_fileno}, but it was called " \ "with {fileno}".format(expected_fileno=connfileno, fileno=args[0]) server.close() def test_nonblocking_accept_mark_as_reopened(self): evt_hub = get_hub() with mock.patch.object(evt_hub, "mark_as_reopened") as patched_mark_as_reopened: def connect_once(listener): # delete/overwrite the original conn # object, only keeping the file object around # closing the file object should close everything client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', listener.getsockname()[1])) client.close() server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('127.0.0.1', 0)) server.listen(50) server.setblocking(False) acceptlet = eventlet.spawn(connect_once, server) out = select.select([server], [], []) conn, addr = server.accept() conn.sendall(b'hello\n') connfileno = conn.fileno() conn.close() assert patched_mark_as_reopened.called assert patched_mark_as_reopened.call_count == 3, "3 fds were opened, but the hub was " \ "only notified {call_count} times" \ .format(call_count=patched_mark_as_reopened.call_count) args, kwargs = patched_mark_as_reopened.call_args assert args == (connfileno,), "Expected mark_as_reopened to be called " \ "with {expected_fileno}, but it was called " \ "with {fileno}".format(expected_fileno=connfileno, fileno=args[0]) server.close() def test_full_duplex(self): large_data = b'*' * 10 * min_buf_size() listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind(('127.0.0.1', 0)) listener.listen(50) bufsized(listener) def send_large(sock): sock.sendall(large_data) def read_large(sock): result = sock.recv(len(large_data)) while len(result) < len(large_data): result += sock.recv(len(large_data)) self.assertEqual(result, large_data) def server(): (sock, addr) = listener.accept() sock = bufsized(sock) send_large_coro = eventlet.spawn(send_large, sock) eventlet.sleep(0) result = sock.recv(10) expected = b'hello world' while len(result) < len(expected): result += sock.recv(10) self.assertEqual(result, expected) send_large_coro.wait() server_evt = eventlet.spawn(server) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', listener.getsockname()[1])) bufsized(client) large_evt = eventlet.spawn(read_large, client) eventlet.sleep(0) client.sendall(b'hello world') server_evt.wait() large_evt.wait() client.close() def test_sendall(self): # test adapted from Marcus Cavanaugh's email # it may legitimately take a while, but will eventually complete self.timer.cancel() second_bytes = 10 def test_sendall_impl(many_bytes): bufsize = max(many_bytes // 15, 2) def sender(listener): (sock, addr) = listener.accept() sock = bufsized(sock, size=bufsize) sock.sendall(b'x' * many_bytes) sock.sendall(b'y' * second_bytes) listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind(("", 0)) listener.listen(50) sender_coro = eventlet.spawn(sender, listener) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', listener.getsockname()[1])) bufsized(client, size=bufsize) total = 0 while total < many_bytes: data = client.recv(min(many_bytes - total, many_bytes // 10)) if not data: break total += len(data) total2 = 0 while total < second_bytes: data = client.recv(second_bytes) if not data: break total2 += len(data) sender_coro.wait() client.close() for how_many in (1000, 10000, 100000, 1000000): test_sendall_impl(how_many) def test_wrap_socket(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(('127.0.0.1', 0)) sock.listen(50) ssl.wrap_socket(sock) def test_timeout_and_final_write(self): # This test verifies that a write on a socket that we've # stopped listening for doesn't result in an incorrect switch server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('127.0.0.1', 0)) server.listen(50) bound_port = server.getsockname()[1] def sender(evt): s2, addr = server.accept() wrap_wfile = s2.makefile('wb') eventlet.sleep(0.02) wrap_wfile.write(b'hi') s2.close() evt.send(b'sent via event') evt = event.Event() eventlet.spawn(sender, evt) # lets the socket enter accept mode, which # is necessary for connect to succeed on windows eventlet.sleep(0) try: # try and get some data off of this pipe # but bail before any is sent eventlet.Timeout(0.01) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', bound_port)) wrap_rfile = client.makefile() wrap_rfile.read(1) self.fail() except eventlet.Timeout: pass result = evt.wait() self.assertEqual(result, b'sent via event') server.close() client.close() @tests.skip_with_pyevent def test_raised_multiple_readers(self): debug.hub_prevent_multiple_readers(True) def handle(sock, addr): sock.recv(1) sock.sendall(b"a") raise eventlet.StopServe() listener = eventlet.listen(('127.0.0.1', 0)) eventlet.spawn(eventlet.serve, listener, handle) def reader(s): s.recv(1) s = eventlet.connect(('127.0.0.1', listener.getsockname()[1])) a = eventlet.spawn(reader, s) eventlet.sleep(0) self.assertRaises(RuntimeError, s.recv, 1) s.sendall(b'b') a.wait() @tests.skip_with_pyevent @tests.skip_if(using_epoll_hub) @tests.skip_if(using_kqueue_hub) def test_closure(self): def spam_to_me(address): sock = eventlet.connect(address) while True: try: sock.sendall(b'hello world') # Arbitrary delay to not use all available CPU, keeps the test # running quickly and reliably under a second time.sleep(0.001) except socket.error as e: if get_errno(e) == errno.EPIPE: return raise server = eventlet.listen(('127.0.0.1', 0)) sender = eventlet.spawn(spam_to_me, server.getsockname()) client, address = server.accept() server.close() def reader(): try: while True: data = client.recv(1024) assert data # Arbitrary delay to not use all available CPU, keeps the test # running quickly and reliably under a second time.sleep(0.001) except socket.error as e: # we get an EBADF because client is closed in the same process # (but a different greenthread) if get_errno(e) != errno.EBADF: raise def closer(): client.close() reader = eventlet.spawn(reader) eventlet.spawn_n(closer) reader.wait() sender.wait() def test_invalid_connection(self): # find an unused port by creating a socket then closing it listening_socket = eventlet.listen(('127.0.0.1', 0)) port = listening_socket.getsockname()[1] listening_socket.close() self.assertRaises(socket.error, eventlet.connect, ('127.0.0.1', port)) def test_zero_timeout_and_back(self): listen = eventlet.listen(('', 0)) # Keep reference to server side of socket server = eventlet.spawn(listen.accept) client = eventlet.connect(listen.getsockname()) client.settimeout(0.05) # Now must raise socket.timeout self.assertRaises(socket.timeout, client.recv, 1) client.settimeout(0) # Now must raise socket.error with EAGAIN try: client.recv(1) assert False except socket.error as e: assert get_errno(e) == errno.EAGAIN client.settimeout(0.05) # Now socket.timeout again self.assertRaises(socket.timeout, client.recv, 1) server.wait() def test_default_nonblocking(self): sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) flags = fcntl.fcntl(sock1.fd.fileno(), fcntl.F_GETFL) assert flags & os.O_NONBLOCK sock2 = socket.socket(sock1.fd) flags = fcntl.fcntl(sock2.fd.fileno(), fcntl.F_GETFL) assert flags & os.O_NONBLOCK def test_dup_nonblocking(self): sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) flags = fcntl.fcntl(sock1.fd.fileno(), fcntl.F_GETFL) assert flags & os.O_NONBLOCK sock2 = sock1.dup() flags = fcntl.fcntl(sock2.fd.fileno(), fcntl.F_GETFL) assert flags & os.O_NONBLOCK def test_skip_nonblocking(self): sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) fd = sock1.fd.fileno() flags = fcntl.fcntl(fd, fcntl.F_GETFL) # on SPARC, nonblocking mode sets O_NDELAY as well fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~(os.O_NONBLOCK | os.O_NDELAY)) flags = fcntl.fcntl(fd, fcntl.F_GETFL) assert flags & (os.O_NONBLOCK | os.O_NDELAY) == 0 sock2 = socket.socket(sock1.fd, set_nonblocking=False) flags = fcntl.fcntl(sock2.fd.fileno(), fcntl.F_GETFL) assert flags & (os.O_NONBLOCK | os.O_NDELAY) == 0 def test_sockopt_interface(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) assert sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 0 assert sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) == b'\000' sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) def test_socketpair_select(self): # https://github.com/eventlet/eventlet/pull/25 s1, s2 = socket.socketpair() assert select.select([], [s1], [], 0) == ([], [s1], []) assert select.select([], [s1], [], 0) == ([], [s1], []) def test_shutdown_safe(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.close() # should not raise greenio.shutdown_safe(sock) def test_datagram_socket_operations_work(self): receiver = greenio.GreenSocket(socket.AF_INET, socket.SOCK_DGRAM) receiver.bind(('127.0.0.1', 0)) address = receiver.getsockname() sender = greenio.GreenSocket(socket.AF_INET, socket.SOCK_DGRAM) # Two ways sendto can be called sender.sendto(b'first', address) sender.sendto(b'second', 0, address) sender_address = ('127.0.0.1', sender.getsockname()[1]) eq_(receiver.recvfrom(1024), (b'first', sender_address)) eq_(receiver.recvfrom(1024), (b'second', sender_address)) def test_get_fileno_of_a_socket_works(): class DummySocket(object): def fileno(self): return 123 assert select.get_fileno(DummySocket()) == 123 def test_get_fileno_of_an_int_works(): assert select.get_fileno(123) == 123 expected_get_fileno_type_error_message = ( 'Expected int or long, got <%s \'str\'>' % ('type' if six.PY2 else 'class')) def test_get_fileno_of_wrong_type_fails(): try: select.get_fileno('foo') except TypeError as ex: assert str(ex) == expected_get_fileno_type_error_message else: assert False, 'Expected TypeError not raised' def test_get_fileno_of_a_socket_with_fileno_returning_wrong_type_fails(): class DummySocket(object): def fileno(self): return 'foo' try: select.get_fileno(DummySocket()) except TypeError as ex: assert str(ex) == expected_get_fileno_type_error_message else: assert False, 'Expected TypeError not raised' class TestGreenPipe(tests.LimitedTestCase): @tests.skip_on_windows def setUp(self): super(self.__class__, self).setUp() self.tempdir = tempfile.mkdtemp('_green_pipe_test') def tearDown(self): shutil.rmtree(self.tempdir) super(self.__class__, self).tearDown() def test_pipe(self): r, w = os.pipe() rf = greenio.GreenPipe(r, 'rb') wf = greenio.GreenPipe(w, 'wb', 0) def sender(f, content): for ch in map(six.int2byte, six.iterbytes(content)): eventlet.sleep(0.0001) f.write(ch) f.close() one_line = b"12345\n" eventlet.spawn(sender, wf, one_line * 5) for i in range(5): line = rf.readline() eventlet.sleep(0.01) self.assertEqual(line, one_line) self.assertEqual(rf.readline(), b'') def test_pipe_read(self): # ensure that 'readline' works properly on GreenPipes when data is not # immediately available (fd is nonblocking, was raising EAGAIN) # also ensures that readline() terminates on '\n' and '\r\n' r, w = os.pipe() r = greenio.GreenPipe(r, 'rb') w = greenio.GreenPipe(w, 'wb') def writer(): eventlet.sleep(.1) w.write(b'line\n') w.flush() w.write(b'line\r\n') w.flush() gt = eventlet.spawn(writer) eventlet.sleep(0) line = r.readline() self.assertEqual(line, b'line\n') line = r.readline() self.assertEqual(line, b'line\r\n') gt.wait() def test_pip_read_until_end(self): # similar to test_pip_read above but reading until eof r, w = os.pipe() r = greenio.GreenPipe(r, 'rb') w = greenio.GreenPipe(w, 'wb') w.write(b'c' * DEFAULT_BUFFER_SIZE * 2) w.close() buf = r.read() # no chunk size specified; read until end self.assertEqual(len(buf), 2 * DEFAULT_BUFFER_SIZE) self.assertEqual(buf[:3], b'ccc') def test_pipe_read_unbuffered(self): # Ensure that seting the buffer size works properly on GreenPipes, # it used to be ignored on Python 2 and the test would hang on r.readline() # below. r, w = os.pipe() r = greenio.GreenPipe(r, 'rb', 0) w = greenio.GreenPipe(w, 'wb', 0) w.write(b'line\n') line = r.readline() self.assertEqual(line, b'line\n') r.close() w.close() def test_pipe_writes_large_messages(self): r, w = os.pipe() r = greenio.GreenPipe(r, 'rb') w = greenio.GreenPipe(w, 'wb') large_message = b"".join([1024 * six.int2byte(i) for i in range(65)]) def writer(): w.write(large_message) w.close() gt = eventlet.spawn(writer) for i in range(65): buf = r.read(1024) expected = 1024 * six.int2byte(i) self.assertEqual( buf, expected, "expected=%r..%r, found=%r..%r iter=%d" % (expected[:4], expected[-4:], buf[:4], buf[-4:], i)) gt.wait() def test_seek_on_buffered_pipe(self): f = greenio.GreenPipe(self.tempdir + "/TestFile", 'wb+', 1024) self.assertEqual(f.tell(), 0) f.seek(0, 2) self.assertEqual(f.tell(), 0) f.write(b'1234567890') f.seek(0, 2) self.assertEqual(f.tell(), 10) f.seek(0) value = f.read(1) self.assertEqual(value, b'1') self.assertEqual(f.tell(), 1) value = f.read(1) self.assertEqual(value, b'2') self.assertEqual(f.tell(), 2) f.seek(0, 1) self.assertEqual(f.readline(), b'34567890') f.seek(-5, 1) self.assertEqual(f.readline(), b'67890') f.seek(0) self.assertEqual(f.readline(), b'1234567890') f.seek(0, 2) self.assertEqual(f.readline(), b'') def test_truncate(self): f = greenio.GreenPipe(self.tempdir + "/TestFile", 'wb+', 1024) f.write(b'1234567890') f.truncate(9) self.assertEqual(f.tell(), 9) class TestGreenIoLong(tests.LimitedTestCase): TEST_TIMEOUT = 10 # the test here might take a while depending on the OS @tests.skip_with_pyevent def test_multiple_readers(self): debug.hub_prevent_multiple_readers(False) recvsize = 2 * min_buf_size() sendsize = 10 * recvsize # test that we can have multiple coroutines reading # from the same fd. We make no guarantees about which one gets which # bytes, but they should both get at least some def reader(sock, results): while True: data = sock.recv(recvsize) if not data: break results.append(data) results1 = [] results2 = [] listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind(('127.0.0.1', 0)) listener.listen(50) def server(): (sock, addr) = listener.accept() sock = bufsized(sock) try: c1 = eventlet.spawn(reader, sock, results1) c2 = eventlet.spawn(reader, sock, results2) try: c1.wait() c2.wait() finally: c1.kill() c2.kill() finally: sock.close() server_coro = eventlet.spawn(server) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', listener.getsockname()[1])) bufsized(client, size=sendsize) # Split into multiple chunks so that we can wait a little # every iteration which allows both readers to queue and # recv some data when we actually send it. for i in range(20): eventlet.sleep(0.001) client.sendall(b'*' * (sendsize // 20)) client.close() server_coro.wait() listener.close() assert len(results1) > 0 assert len(results2) > 0 debug.hub_prevent_multiple_readers() def test_set_nonblocking(): sock = _orig_sock.socket(socket.AF_INET, socket.SOCK_DGRAM) fileno = sock.fileno() orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL) assert orig_flags & os.O_NONBLOCK == 0 greenio.set_nonblocking(sock) new_flags = fcntl.fcntl(fileno, fcntl.F_GETFL) # on SPARC, O_NDELAY is set as well, and it might be a superset # of O_NONBLOCK assert (new_flags == (orig_flags | os.O_NONBLOCK) or new_flags == (orig_flags | os.O_NONBLOCK | os.O_NDELAY)) def test_socket_del_fails_gracefully_when_not_fully_initialized(): # Regression introduced in da87716714689894f23d0db7b003f26d97031e83, reported in: # * GH #137 https://github.com/eventlet/eventlet/issues/137 # * https://bugs.launchpad.net/oslo.messaging/+bug/1369999 class SocketSubclass(socket.socket): def __init__(self): pass with tests.capture_stderr() as err: SocketSubclass() assert err.getvalue() == '' def test_double_close_219(): tests.run_isolated('greenio_double_close_219.py') def test_partial_write_295(): # https://github.com/eventlet/eventlet/issues/295 # `socket.makefile('w').writelines()` must send all # despite partial writes by underlying socket listen_socket = eventlet.listen(('localhost', 0)) original_accept = listen_socket.accept def talk(conn): f = conn.makefile('wb') line = b'*' * 2140 f.writelines([line] * 10000) conn.close() def accept(): connection, address = original_accept() original_send = connection.send def slow_send(b, *args): b = b[:1031] return original_send(b, *args) connection.send = slow_send eventlet.spawn(talk, connection) return connection, address listen_socket.accept = accept eventlet.spawn(listen_socket.accept) sock = eventlet.connect(listen_socket.getsockname()) with eventlet.Timeout(10): bs = sock.makefile('rb').read() assert len(bs) == 21400000 assert bs == (b'*' * 21400000) def test_socket_file_read_non_int(): listen_socket = eventlet.listen(('localhost', 0)) def server(): conn, _ = listen_socket.accept() conn.recv(1) conn.sendall(b'response') conn.close() eventlet.spawn(server) sock = eventlet.connect(listen_socket.getsockname()) fd = sock.makefile('rwb') fd.write(b'?') fd.flush() with eventlet.Timeout(1): try: fd.read("This shouldn't work") assert False except TypeError: pass def test_pipe_context(): # ensure using a pipe as a context actually closes it. r, w = os.pipe() r = greenio.GreenPipe(r) w = greenio.GreenPipe(w, 'w') with r: pass assert r.closed and not w.closed with w as f: assert f == w assert r.closed and w.closed eventlet-0.30.2/tests/greenpool_test.py0000644000076500000240000003536314006212666020636 0ustar temotostaff00000000000000import gc import random import eventlet from eventlet import hubs, pools from eventlet.support import greenlets as greenlet import six import tests def passthru(a): eventlet.sleep(0.01) return a def passthru2(a, b): eventlet.sleep(0.01) return a, b def raiser(exc): raise exc class GreenPool(tests.LimitedTestCase): def test_spawn(self): p = eventlet.GreenPool(4) waiters = [] for i in range(10): waiters.append(p.spawn(passthru, i)) results = [waiter.wait() for waiter in waiters] self.assertEqual(results, list(range(10))) def test_spawn_n(self): p = eventlet.GreenPool(4) results_closure = [] def do_something(a): eventlet.sleep(0.01) results_closure.append(a) for i in range(10): p.spawn(do_something, i) p.waitall() self.assertEqual(results_closure, list(range(10))) def test_waiting(self): pool = eventlet.GreenPool(1) done = eventlet.Event() def consume(): done.wait() def waiter(pool): gt = pool.spawn(consume) gt.wait() waiters = [] self.assertEqual(pool.running(), 0) waiters.append(eventlet.spawn(waiter, pool)) eventlet.sleep(0) self.assertEqual(pool.waiting(), 0) waiters.append(eventlet.spawn(waiter, pool)) eventlet.sleep(0) self.assertEqual(pool.waiting(), 1) waiters.append(eventlet.spawn(waiter, pool)) eventlet.sleep(0) self.assertEqual(pool.waiting(), 2) self.assertEqual(pool.running(), 1) done.send(None) for w in waiters: w.wait() self.assertEqual(pool.waiting(), 0) self.assertEqual(pool.running(), 0) def test_multiple_coros(self): evt = eventlet.Event() results = [] def producer(): results.append('prod') evt.send() def consumer(): results.append('cons1') evt.wait() results.append('cons2') pool = eventlet.GreenPool(2) done = pool.spawn(consumer) pool.spawn_n(producer) done.wait() self.assertEqual(['cons1', 'prod', 'cons2'], results) def test_timer_cancel(self): # this test verifies that local timers are not fired # outside of the context of the spawn timer_fired = [] def fire_timer(): timer_fired.append(True) def some_work(): hubs.get_hub().schedule_call_local(0, fire_timer) pool = eventlet.GreenPool(2) worker = pool.spawn(some_work) worker.wait() eventlet.sleep(0) eventlet.sleep(0) self.assertEqual(timer_fired, []) def test_reentrant(self): pool = eventlet.GreenPool(1) def reenter(): waiter = pool.spawn(lambda a: a, 'reenter') self.assertEqual('reenter', waiter.wait()) outer_waiter = pool.spawn(reenter) outer_waiter.wait() evt = eventlet.Event() def reenter_async(): pool.spawn_n(lambda a: a, 'reenter') evt.send('done') pool.spawn_n(reenter_async) self.assertEqual('done', evt.wait()) def assert_pool_has_free(self, pool, num_free): self.assertEqual(pool.free(), num_free) def wait_long_time(e): e.wait() timer = eventlet.Timeout(1) try: evt = eventlet.Event() for x in six.moves.range(num_free): pool.spawn(wait_long_time, evt) # if the pool has fewer free than we expect, # then we'll hit the timeout error finally: timer.cancel() # if the runtime error is not raised it means the pool had # some unexpected free items timer = eventlet.Timeout(0, RuntimeError) try: self.assertRaises(RuntimeError, pool.spawn, wait_long_time, evt) finally: timer.cancel() # clean up by causing all the wait_long_time functions to return evt.send(None) eventlet.sleep(0) eventlet.sleep(0) def test_resize(self): pool = eventlet.GreenPool(2) evt = eventlet.Event() def wait_long_time(e): e.wait() pool.spawn(wait_long_time, evt) pool.spawn(wait_long_time, evt) self.assertEqual(pool.free(), 0) self.assertEqual(pool.running(), 2) self.assert_pool_has_free(pool, 0) # verify that the pool discards excess items put into it pool.resize(1) # cause the wait_long_time functions to return, which will # trigger puts to the pool evt.send(None) eventlet.sleep(0) eventlet.sleep(0) self.assertEqual(pool.free(), 1) self.assertEqual(pool.running(), 0) self.assert_pool_has_free(pool, 1) # resize larger and assert that there are more free items pool.resize(2) self.assertEqual(pool.free(), 2) self.assertEqual(pool.running(), 0) self.assert_pool_has_free(pool, 2) def test_pool_smash(self): # The premise is that a coroutine in a Pool tries to get a token out # of a token pool but times out before getting the token. We verify # that neither pool is adversely affected by this situation. pool = eventlet.GreenPool(1) tp = pools.TokenPool(max_size=1) tp.get() # empty out the pool def do_receive(tp): timer = eventlet.Timeout(0, RuntimeError()) try: tp.get() self.fail("Shouldn't have received anything from the pool") except RuntimeError: return 'timed out' else: timer.cancel() # the spawn makes the token pool expect that coroutine, but then # immediately cuts bait e1 = pool.spawn(do_receive, tp) self.assertEqual(e1.wait(), 'timed out') # the pool can get some random item back def send_wakeup(tp): tp.put('wakeup') gt = eventlet.spawn(send_wakeup, tp) # now we ask the pool to run something else, which should not # be affected by the previous send at all def resume(): return 'resumed' e2 = pool.spawn(resume) self.assertEqual(e2.wait(), 'resumed') # we should be able to get out the thing we put in there, too self.assertEqual(tp.get(), 'wakeup') gt.wait() def test_spawn_n_2(self): p = eventlet.GreenPool(2) self.assertEqual(p.free(), 2) r = [] def foo(a): r.append(a) gt = p.spawn(foo, 1) self.assertEqual(p.free(), 1) gt.wait() self.assertEqual(r, [1]) eventlet.sleep(0) self.assertEqual(p.free(), 2) # Once the pool is exhausted, spawning forces a yield. p.spawn_n(foo, 2) self.assertEqual(1, p.free()) self.assertEqual(r, [1]) p.spawn_n(foo, 3) self.assertEqual(0, p.free()) self.assertEqual(r, [1]) p.spawn_n(foo, 4) self.assertEqual(set(r), set([1, 2, 3])) eventlet.sleep(0) self.assertEqual(set(r), set([1, 2, 3, 4])) def test_exceptions(self): p = eventlet.GreenPool(2) for m in (p.spawn, p.spawn_n): self.assert_pool_has_free(p, 2) m(raiser, RuntimeError()) self.assert_pool_has_free(p, 1) p.waitall() self.assert_pool_has_free(p, 2) m(raiser, greenlet.GreenletExit) self.assert_pool_has_free(p, 1) p.waitall() self.assert_pool_has_free(p, 2) def test_imap(self): p = eventlet.GreenPool(4) result_list = list(p.imap(passthru, range(10))) self.assertEqual(result_list, list(range(10))) def test_empty_imap(self): p = eventlet.GreenPool(4) result_iter = p.imap(passthru, []) self.assertRaises(StopIteration, result_iter.next) def test_imap_nonefunc(self): p = eventlet.GreenPool(4) result_list = list(p.imap(None, range(10))) self.assertEqual(result_list, [(x,) for x in range(10)]) def test_imap_multi_args(self): p = eventlet.GreenPool(4) result_list = list(p.imap(passthru2, range(10), range(10, 20))) self.assertEqual(result_list, list(zip(range(10), range(10, 20)))) def test_imap_raises(self): # testing the case where the function raises an exception; # both that the caller sees that exception, and that the iterator # continues to be usable to get the rest of the items p = eventlet.GreenPool(4) def raiser(item): if item == 1 or item == 7: raise RuntimeError("intentional error") else: return item it = p.imap(raiser, range(10)) results = [] while True: try: results.append(six.next(it)) except RuntimeError: results.append('r') except StopIteration: break self.assertEqual(results, [0, 'r', 2, 3, 4, 5, 6, 'r', 8, 9]) def test_starmap(self): p = eventlet.GreenPool(4) result_list = list(p.starmap(passthru, [(x,) for x in range(10)])) self.assertEqual(result_list, list(range(10))) def test_waitall_on_nothing(self): p = eventlet.GreenPool() p.waitall() def test_recursive_waitall(self): p = eventlet.GreenPool() gt = p.spawn(p.waitall) self.assertRaises(AssertionError, gt.wait) class GreenPile(tests.LimitedTestCase): def test_pile(self): p = eventlet.GreenPile(4) for i in range(10): p.spawn(passthru, i) result_list = list(p) self.assertEqual(result_list, list(range(10))) def test_pile_spawn_times_out(self): p = eventlet.GreenPile(4) for i in range(4): p.spawn(passthru, i) # now it should be full and this should time out eventlet.Timeout(0) self.assertRaises(eventlet.Timeout, p.spawn, passthru, "time out") # verify that the spawn breakage didn't interrupt the sequence # and terminates properly for i in range(4, 10): p.spawn(passthru, i) self.assertEqual(list(p), list(range(10))) def test_empty_pile(self): p = eventlet.GreenPile(4) # no spawn()s # If this hangs, LimitedTestCase should time out self.assertEqual(list(p), []) def test_constructing_from_pool(self): pool = eventlet.GreenPool(2) pile1 = eventlet.GreenPile(pool) pile2 = eventlet.GreenPile(pool) def bunch_of_work(pile, unique): for i in range(10): pile.spawn(passthru, i + unique) eventlet.spawn(bunch_of_work, pile1, 0) eventlet.spawn(bunch_of_work, pile2, 100) eventlet.sleep(0) self.assertEqual(list(pile2), list(range(100, 110))) self.assertEqual(list(pile1), list(range(10))) def test_greenpool_type_check(): eventlet.GreenPool(0) eventlet.GreenPool(1) eventlet.GreenPool(1e3) with tests.assert_raises(TypeError): eventlet.GreenPool('foo') with tests.assert_raises(ValueError): eventlet.GreenPool(-1) class StressException(Exception): pass r = random.Random(0) def pressure(arg): while r.random() < 0.5: eventlet.sleep(r.random() * 0.001) if r.random() < 0.8: return arg else: raise StressException(arg) def passthru(arg): while r.random() < 0.5: eventlet.sleep(r.random() * 0.001) return arg class Stress(tests.LimitedTestCase): # tests will take extra-long TEST_TIMEOUT = 60 def spawn_order_check(self, concurrency): # checks that piles are strictly ordered p = eventlet.GreenPile(concurrency) def makework(count, unique): for i in six.moves.range(count): token = (unique, i) p.spawn(pressure, token) iters = 1000 eventlet.spawn(makework, iters, 1) eventlet.spawn(makework, iters, 2) eventlet.spawn(makework, iters, 3) p.spawn(pressure, (0, 0)) latest = [-1] * 4 received = 0 it = iter(p) while True: try: i = six.next(it) except StressException as exc: i = exc.args[0] except StopIteration: break received += 1 if received % 5 == 0: eventlet.sleep(0.0001) unique, order = i assert latest[unique] < order latest[unique] = order for l in latest[1:]: self.assertEqual(l, iters - 1) def test_ordering_5(self): self.spawn_order_check(5) def test_ordering_50(self): self.spawn_order_check(50) def imap_memory_check(self, concurrency): # checks that imap is strictly # ordered and consumes a constant amount of memory p = eventlet.GreenPool(concurrency) count = 1000 it = p.imap(passthru, six.moves.range(count)) latest = -1 while True: try: i = six.next(it) except StopIteration: break if latest == -1: gc.collect() initial_obj_count = len(gc.get_objects()) assert i > latest latest = i if latest % 5 == 0: eventlet.sleep(0.001) if latest % 10 == 0: gc.collect() objs_created = len(gc.get_objects()) - initial_obj_count assert objs_created < 25 * concurrency, objs_created # make sure we got to the end self.assertEqual(latest, count - 1) def test_imap_50(self): self.imap_memory_check(50) def test_imap_500(self): self.imap_memory_check(500) def test_with_intpool(self): class IntPool(pools.Pool): def create(self): self.current_integer = getattr(self, 'current_integer', 0) + 1 return self.current_integer def subtest(intpool_size, pool_size, num_executes): def run(int_pool): token = int_pool.get() eventlet.sleep(0.0001) int_pool.put(token) return token int_pool = IntPool(max_size=intpool_size) pool = eventlet.GreenPool(pool_size) for ix in six.moves.range(num_executes): pool.spawn(run, int_pool) pool.waitall() subtest(4, 7, 7) subtest(50, 75, 100) for isize in (10, 20, 30, 40, 50): for psize in (5, 25, 35, 50): subtest(isize, psize, psize) eventlet-0.30.2/tests/greenthread_test.py0000644000076500000240000001070214006212666021122 0ustar temotostaff00000000000000from tests import LimitedTestCase from eventlet import greenthread from eventlet.support import greenlets as greenlet _g_results = [] def passthru(*args, **kw): _g_results.append((args, kw)) return args, kw def waiter(a): greenthread.sleep(0.1) return a class Asserts(object): def assert_dead(self, gt): if hasattr(gt, 'wait'): self.assertRaises(greenlet.GreenletExit, gt.wait) assert gt.dead assert not gt class Spawn(LimitedTestCase, Asserts): def tearDown(self): global _g_results super(Spawn, self).tearDown() _g_results = [] def test_simple(self): gt = greenthread.spawn(passthru, 1, b=2) self.assertEqual(gt.wait(), ((1,), {'b': 2})) self.assertEqual(_g_results, [((1,), {'b': 2})]) def test_n(self): gt = greenthread.spawn_n(passthru, 2, b=3) assert not gt.dead greenthread.sleep(0) assert gt.dead self.assertEqual(_g_results, [((2,), {'b': 3})]) def test_kill(self): gt = greenthread.spawn(passthru, 6) greenthread.kill(gt) self.assert_dead(gt) greenthread.sleep(0.001) self.assertEqual(_g_results, []) greenthread.kill(gt) self.assert_dead(gt) def test_kill_meth(self): gt = greenthread.spawn(passthru, 6) gt.kill() self.assert_dead(gt) greenthread.sleep(0.001) self.assertEqual(_g_results, []) gt.kill() self.assert_dead(gt) def test_kill_n(self): gt = greenthread.spawn_n(passthru, 7) greenthread.kill(gt) self.assert_dead(gt) greenthread.sleep(0.001) self.assertEqual(_g_results, []) greenthread.kill(gt) self.assert_dead(gt) def test_link(self): results = [] def link_func(g, *a, **kw): results.append(g) results.append(a) results.append(kw) gt = greenthread.spawn(passthru, 5) gt.link(link_func, 4, b=5) self.assertEqual(gt.wait(), ((5,), {})) self.assertEqual(results, [gt, (4,), {'b': 5}]) def test_link_after_exited(self): results = [] def link_func(g, *a, **kw): results.append(g) results.append(a) results.append(kw) gt = greenthread.spawn(passthru, 5) self.assertEqual(gt.wait(), ((5,), {})) gt.link(link_func, 4, b=5) self.assertEqual(results, [gt, (4,), {'b': 5}]) def test_link_relinks(self): # test that linking in a linked func doesn't cause infinite recursion. called = [] def link_func(g): g.link(link_func_pass) def link_func_pass(g): called.append(True) gt = greenthread.spawn(passthru) gt.link(link_func) gt.wait() self.assertEqual(called, [True]) class SpawnAfter(Spawn): def test_basic(self): gt = greenthread.spawn_after(0.1, passthru, 20) self.assertEqual(gt.wait(), ((20,), {})) def test_cancel(self): gt = greenthread.spawn_after(0.1, passthru, 21) gt.cancel() self.assert_dead(gt) def test_cancel_already_started(self): gt = greenthread.spawn_after(0, waiter, 22) greenthread.sleep(0) gt.cancel() self.assertEqual(gt.wait(), 22) def test_kill_already_started(self): gt = greenthread.spawn_after(0, waiter, 22) greenthread.sleep(0) gt.kill() self.assert_dead(gt) class SpawnAfterLocal(LimitedTestCase, Asserts): def setUp(self): super(SpawnAfterLocal, self).setUp() self.lst = [1] def test_timer_fired(self): def func(): greenthread.spawn_after_local(0.1, self.lst.pop) greenthread.sleep(0.2) greenthread.spawn(func) assert self.lst == [1], self.lst greenthread.sleep(0.3) assert self.lst == [], self.lst def test_timer_cancelled_upon_greenlet_exit(self): def func(): greenthread.spawn_after_local(0.1, self.lst.pop) greenthread.spawn(func) assert self.lst == [1], self.lst greenthread.sleep(0.2) assert self.lst == [1], self.lst def test_spawn_is_not_cancelled(self): def func(): greenthread.spawn(self.lst.pop) # exiting immediatelly, but self.lst.pop must be called greenthread.spawn(func) greenthread.sleep(0.1) assert self.lst == [], self.lst eventlet-0.30.2/tests/hub_test.py0000644000076500000240000003162414006212666017416 0ustar temotostaff00000000000000from __future__ import with_statement import errno import fcntl import os import sys import time import tests from tests import skip_with_pyevent, skip_if_no_itimer, skip_unless import eventlet from eventlet import debug, hubs from eventlet.support import greenlets import six DELAY = 0.001 def noop(): pass class TestTimerCleanup(tests.LimitedTestCase): TEST_TIMEOUT = 2 @skip_with_pyevent def test_cancel_immediate(self): hub = hubs.get_hub() stimers = hub.get_timers_count() scanceled = hub.timers_canceled for i in six.moves.range(2000): t = hubs.get_hub().schedule_call_global(60, noop) t.cancel() self.assert_less_than_equal(hub.timers_canceled, hub.get_timers_count() + 1) # there should be fewer than 1000 new timers and canceled self.assert_less_than_equal(hub.get_timers_count(), 1000 + stimers) self.assert_less_than_equal(hub.timers_canceled, 1000) @skip_with_pyevent def test_cancel_accumulated(self): hub = hubs.get_hub() stimers = hub.get_timers_count() scanceled = hub.timers_canceled for i in six.moves.range(2000): t = hubs.get_hub().schedule_call_global(60, noop) eventlet.sleep() self.assert_less_than_equal(hub.timers_canceled, hub.get_timers_count() + 1) t.cancel() self.assert_less_than_equal(hub.timers_canceled, hub.get_timers_count() + 1, hub.timers) # there should be fewer than 1000 new timers and canceled self.assert_less_than_equal(hub.get_timers_count(), 1000 + stimers) self.assert_less_than_equal(hub.timers_canceled, 1000) @skip_with_pyevent def test_cancel_proportion(self): # if fewer than half the pending timers are canceled, it should # not clean them out hub = hubs.get_hub() uncanceled_timers = [] stimers = hub.get_timers_count() scanceled = hub.timers_canceled for i in six.moves.range(1000): # 2/3rds of new timers are uncanceled t = hubs.get_hub().schedule_call_global(60, noop) t2 = hubs.get_hub().schedule_call_global(60, noop) t3 = hubs.get_hub().schedule_call_global(60, noop) eventlet.sleep() self.assert_less_than_equal(hub.timers_canceled, hub.get_timers_count() + 1) t.cancel() self.assert_less_than_equal(hub.timers_canceled, hub.get_timers_count() + 1) uncanceled_timers.append(t2) uncanceled_timers.append(t3) # 3000 new timers, plus a few extras self.assert_less_than_equal(stimers + 3000, stimers + hub.get_timers_count()) self.assertEqual(hub.timers_canceled, 1000) for t in uncanceled_timers: t.cancel() self.assert_less_than_equal(hub.timers_canceled, hub.get_timers_count()) eventlet.sleep() class TestMultipleListenersCleanup(tests.LimitedTestCase): def setUp(self): super(TestMultipleListenersCleanup, self).setUp() debug.hub_prevent_multiple_readers(False) debug.hub_exceptions(False) def tearDown(self): super(TestMultipleListenersCleanup, self).tearDown() debug.hub_prevent_multiple_readers(True) debug.hub_exceptions(True) def test_cleanup(self): r, w = os.pipe() self.addCleanup(os.close, r) self.addCleanup(os.close, w) fcntl.fcntl(r, fcntl.F_SETFL, fcntl.fcntl(r, fcntl.F_GETFL) | os.O_NONBLOCK) def readfd(fd): while True: try: return os.read(fd, 1) except OSError as e: if e.errno != errno.EAGAIN: raise hubs.trampoline(fd, read=True) first_listener = eventlet.spawn(readfd, r) eventlet.sleep() second_listener = eventlet.spawn(readfd, r) eventlet.sleep() hubs.get_hub().schedule_call_global(0, second_listener.throw, eventlet.Timeout(None)) eventlet.sleep() os.write(w, b'.') self.assertEqual(first_listener.wait(), b'.') class TestScheduleCall(tests.LimitedTestCase): def test_local(self): lst = [1] eventlet.spawn(hubs.get_hub().schedule_call_local, DELAY, lst.pop) eventlet.sleep(0) eventlet.sleep(DELAY * 2) assert lst == [1], lst def test_global(self): lst = [1] eventlet.spawn(hubs.get_hub().schedule_call_global, DELAY, lst.pop) eventlet.sleep(0) eventlet.sleep(DELAY * 2) assert lst == [], lst def test_ordering(self): lst = [] hubs.get_hub().schedule_call_global(DELAY * 2, lst.append, 3) hubs.get_hub().schedule_call_global(DELAY, lst.append, 1) hubs.get_hub().schedule_call_global(DELAY, lst.append, 2) while len(lst) < 3: eventlet.sleep(DELAY) self.assertEqual(lst, [1, 2, 3]) class TestDebug(tests.LimitedTestCase): def test_debug_listeners(self): hubs.get_hub().set_debug_listeners(True) hubs.get_hub().set_debug_listeners(False) def test_timer_exceptions(self): hubs.get_hub().set_timer_exceptions(True) hubs.get_hub().set_timer_exceptions(False) class TestExceptionInMainloop(tests.LimitedTestCase): def test_sleep(self): # even if there was an error in the mainloop, the hub should continue # to work start = time.time() eventlet.sleep(DELAY) delay = time.time() - start assert delay >= DELAY * \ 0.9, 'sleep returned after %s seconds (was scheduled for %s)' % ( delay, DELAY) def fail(): 1 // 0 hubs.get_hub().schedule_call_global(0, fail) start = time.time() eventlet.sleep(DELAY) delay = time.time() - start assert delay >= DELAY * \ 0.9, 'sleep returned after %s seconds (was scheduled for %s)' % ( delay, DELAY) class TestExceptionInGreenthread(tests.LimitedTestCase): @skip_unless(greenlets.preserves_excinfo) def test_exceptionpreservation(self): # events for controlling execution order gt1event = eventlet.Event() gt2event = eventlet.Event() def test_gt1(): try: raise KeyError() except KeyError: gt1event.send('exception') gt2event.wait() assert sys.exc_info()[0] is KeyError gt1event.send('test passed') def test_gt2(): gt1event.wait() gt1event.reset() assert sys.exc_info()[0] is None try: raise ValueError() except ValueError: gt2event.send('exception') gt1event.wait() assert sys.exc_info()[0] is ValueError g1 = eventlet.spawn(test_gt1) g2 = eventlet.spawn(test_gt2) try: g1.wait() g2.wait() finally: g1.kill() g2.kill() def test_exceptionleaks(self): # tests expected behaviour with all versions of greenlet def test_gt(sem): try: raise KeyError() except KeyError: sem.release() hubs.get_hub().switch() # semaphores for controlling execution order sem = eventlet.Semaphore() sem.acquire() g = eventlet.spawn(test_gt, sem) try: sem.acquire() assert sys.exc_info()[0] is None finally: g.kill() class TestHubBlockingDetector(tests.LimitedTestCase): TEST_TIMEOUT = 10 @skip_with_pyevent def test_block_detect(self): def look_im_blocking(): import time time.sleep(2) from eventlet import debug debug.hub_blocking_detection(True) gt = eventlet.spawn(look_im_blocking) self.assertRaises(RuntimeError, gt.wait) debug.hub_blocking_detection(False) @skip_with_pyevent @skip_if_no_itimer def test_block_detect_with_itimer(self): def look_im_blocking(): import time time.sleep(0.5) from eventlet import debug debug.hub_blocking_detection(True, resolution=0.1) gt = eventlet.spawn(look_im_blocking) self.assertRaises(RuntimeError, gt.wait) debug.hub_blocking_detection(False) class TestSuspend(tests.LimitedTestCase): TEST_TIMEOUT = 4 longMessage = True maxDiff = None def test_suspend_doesnt_crash(self): import os import shutil import signal import subprocess import sys import tempfile self.tempdir = tempfile.mkdtemp('test_suspend') filename = os.path.join(self.tempdir, 'test_suspend.py') fd = open(filename, "w") fd.write("""import eventlet eventlet.Timeout(0.5) try: eventlet.listen(("127.0.0.1", 0)).accept() except eventlet.Timeout: print("exited correctly") """) fd.close() python_path = os.pathsep.join(sys.path + [self.tempdir]) new_env = os.environ.copy() new_env['PYTHONPATH'] = python_path p = subprocess.Popen([sys.executable, os.path.join(self.tempdir, filename)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=new_env) eventlet.sleep(0.4) # wait for process to hit accept os.kill(p.pid, signal.SIGSTOP) # suspend and resume to generate EINTR os.kill(p.pid, signal.SIGCONT) output, _ = p.communicate() lines = output.decode('utf-8', 'replace').splitlines() assert "exited correctly" in lines[-1], output shutil.rmtree(self.tempdir) def test_repeated_select_bad_fd(): from eventlet.green import select def once(): try: select.select([-1], [], []) assert False, 'Expected ValueError' except ValueError: pass once() once() @skip_with_pyevent def test_fork(): tests.run_isolated('hub_fork.py') def test_fork_simple(): tests.run_isolated('hub_fork_simple.py') class TestDeadRunLoop(tests.LimitedTestCase): TEST_TIMEOUT = 2 class CustomException(Exception): pass def test_kill(self): """ Checks that killing a process after the hub runloop dies does not immediately return to hub greenlet's parent and schedule a redundant timer. """ hub = hubs.get_hub() def dummyproc(): hub.switch() g = eventlet.spawn(dummyproc) eventlet.sleep(0) # let dummyproc run assert hub.greenlet.parent == eventlet.greenthread.getcurrent() self.assertRaises(KeyboardInterrupt, hub.greenlet.throw, KeyboardInterrupt()) # kill dummyproc, this schedules a timer to return execution to # this greenlet before throwing an exception in dummyproc. # it is from this timer that execution should be returned to this # greenlet, and not by propogating of the terminating greenlet. g.kill() with eventlet.Timeout(0.5, self.CustomException()): # we now switch to the hub, there should be no existing timers # that switch back to this greenlet and so this hub.switch() # call should block indefinitely. self.assertRaises(self.CustomException, hub.switch) def test_parent(self): """ Checks that a terminating greenthread whose parent was a previous, now-defunct hub greenlet returns execution to the hub runloop and not the hub greenlet's parent. """ hub = hubs.get_hub() def dummyproc(): pass g = eventlet.spawn(dummyproc) assert hub.greenlet.parent == eventlet.greenthread.getcurrent() self.assertRaises(KeyboardInterrupt, hub.greenlet.throw, KeyboardInterrupt()) assert not g.dead # check dummyproc hasn't completed with eventlet.Timeout(0.5, self.CustomException()): # we now switch to the hub which will allow # completion of dummyproc. # this should return execution back to the runloop and not # this greenlet so that hub.switch() would block indefinitely. self.assertRaises(self.CustomException, hub.switch) assert g.dead # sanity check that dummyproc has completed def test_use_hub_class(): tests.run_isolated('hub_use_hub_class.py') def test_kqueue_unsupported(): # https://github.com/eventlet/eventlet/issues/38 # get_hub on windows broken by kqueue tests.run_isolated('hub_kqueue_unsupported.py') eventlet-0.30.2/tests/isolated/0000755000076500000240000000000014017673044017030 5ustar temotostaff00000000000000eventlet-0.30.2/tests/isolated/__init__.py0000644000076500000240000000000014006212666021124 0ustar temotostaff00000000000000eventlet-0.30.2/tests/isolated/env_tpool_negative.py0000644000076500000240000000032414006212666023265 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': from eventlet import tpool def do(): print("should not get here") try: tpool.execute(do) except AssertionError: print('pass') eventlet-0.30.2/tests/isolated/env_tpool_size.py0000644000076500000240000000114314006212666022435 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import sys import time from eventlet import tpool import eventlet current = [0] highwater = [0] def count(): current[0] += 1 time.sleep(0.01) if current[0] > highwater[0]: highwater[0] = current[0] current[0] -= 1 expected = int(sys.argv[1]) normal = int(sys.argv[2]) p = eventlet.GreenPool() for i in range(expected * 2): p.spawn(tpool.execute, count) p.waitall() assert highwater[0] > normal, "Highwater %s <= %s" % (highwater[0], normal) print('pass') eventlet-0.30.2/tests/isolated/env_tpool_zero.py0000644000076500000240000000101714006212666022442 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import warnings from eventlet import tpool g = [False] def do(): g[0] = True with warnings.catch_warnings(record=True) as ws: warnings.simplefilter('always', category=RuntimeWarning) tpool.execute(do) msgs = [str(w) for w in ws] assert len(ws) == 1, msgs msg = str(ws[0].message) assert 'Zero threads in tpool' in msg assert 'EVENTLET_THREADPOOL_SIZE' in msg assert g[0] print('pass') eventlet-0.30.2/tests/isolated/green_http_doesnt_change_original_module.py0000644000076500000240000000053614006212666027654 0ustar temotostaff00000000000000if __name__ == '__main__': # Importing eventlet.green.http.client after http.client was already imported # used to change the original http/http.client, that was breaking things. import http.client original_id = id(http.client) import eventlet.green.http.client # noqa assert id(http.client) == original_id print('pass') eventlet-0.30.2/tests/isolated/green_httplib_doesnt_change_original_module.py0000644000076500000240000000052614006212666030342 0ustar temotostaff00000000000000if __name__ == '__main__': # Importing eventlet.green.httplib after http.client was already imported # used to change the original http/http.client, that was breaking things. import http.client original_id = id(http.client) import eventlet.green.httplib # noqa assert id(http.client) == original_id print('pass') eventlet-0.30.2/tests/isolated/green_ssl_py36_properties.py0000644000076500000240000000055214006212666024517 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import eventlet eventlet.monkey_patch() try: eventlet.wrap_ssl( eventlet.listen(('localhost', 0)), certfile='does-not-exist', keyfile='does-not-exist', server_side=True) except IOError as ex: assert ex.errno == 2 print('pass') eventlet-0.30.2/tests/isolated/greendns_from_address_203.py0000644000076500000240000000030114006212666024312 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import eventlet from dns import reversename eventlet.monkey_patch(all=True) reversename.from_address('127.0.0.1') print('pass') eventlet-0.30.2/tests/isolated/greendns_import_rdtypes_then_eventlet.py0000644000076500000240000000036514006212666027300 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import dns.rdtypes import eventlet.support.greendns # AttributeError: 'module' object has no attribute 'dnskeybase' # https://github.com/eventlet/eventlet/issues/479 print('pass') eventlet-0.30.2/tests/isolated/greenio_double_close_219.py0000644000076500000240000000111314006212666024135 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import eventlet eventlet.monkey_patch() import subprocess import gc p = subprocess.Popen(['ls'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # the following line creates a _SocketDuckForFd(3) and .close()s it, but the # object has not been collected by the GC yet p.communicate() f = open('/dev/null', 'rb') # f.fileno() == 3 gc.collect() # this calls the __del__ of _SocketDuckForFd(3), close()ing it again f.close() # OSError, because the fd 3 has already been closed print('pass') eventlet-0.30.2/tests/isolated/hub_fork.py0000644000076500000240000000174214006212666021202 0ustar temotostaff00000000000000# verify eventlet.listen() accepts in forked children __test__ = False if __name__ == '__main__': import os import sys import eventlet server = eventlet.listen(('127.0.0.1', 0)) result = eventlet.with_timeout(0.01, server.accept, timeout_value=True) assert result is True, 'Expected timeout' pid = os.fork() if pid < 0: print('fork error') sys.exit(1) elif pid == 0: with eventlet.Timeout(1): sock, _ = server.accept() sock.sendall('ok {0}'.format(os.getpid()).encode()) sock.close() sys.exit(0) elif pid > 0: with eventlet.Timeout(1): sock = eventlet.connect(server.getsockname()) data = sock.recv(20).decode() assert data.startswith('ok ') spid = int(data[3:].strip()) assert spid == pid kpid, status = os.wait() assert kpid == pid assert status == 0 print('pass') eventlet-0.30.2/tests/isolated/hub_fork_simple.py0000644000076500000240000000251514006212666022552 0ustar temotostaff00000000000000import os import signal import sys import tempfile __test__ = False def parent(signal_path, pid): eventlet.Timeout(5) port = None while True: try: contents = open(signal_path, 'rb').read() port = int(contents.strip()) break except Exception: eventlet.sleep(0.1) eventlet.connect(('127.0.0.1', port)) while True: try: contents = open(signal_path, 'rb').read() result = contents.split()[1] break except Exception: eventlet.sleep(0.1) assert result == b'done', repr(result) print('pass') def child(signal_path): eventlet.Timeout(5) s = eventlet.listen(('127.0.0.1', 0)) with open(signal_path, 'wb') as f: f.write(str(s.getsockname()[1]).encode() + b'\n') f.flush() s.accept() f.write(b'done\n') f.flush() if __name__ == '__main__': import eventlet with tempfile.NamedTemporaryFile() as signal_file: signal_path = signal_file.name pid = os.fork() if pid < 0: sys.stderr.write('fork error\n') sys.exit(1) elif pid == 0: child(signal_path) sys.exit(0) elif pid > 0: try: parent(signal_path, pid) except Exception: os.kill(pid, signal.SIGTERM) eventlet-0.30.2/tests/isolated/hub_kqueue_unsupported.py0000644000076500000240000000152314006212666024205 0ustar temotostaff00000000000000from __future__ import print_function __test__ = False def delattr_silent(x, name): try: delattr(x, name) except AttributeError: pass if __name__ == '__main__': # Simulate absence of kqueue even on platforms that support it. import select delattr_silent(select, 'kqueue') delattr_silent(select, 'KQ_FILTER_READ') # patcher.original used in hub may reimport and return deleted kqueue attribute import eventlet.patcher select_original = eventlet.patcher.original('select') delattr_silent(select_original, 'kqueue') delattr_silent(select_original, 'KQ_FILTER_READ') import eventlet.hubs default = eventlet.hubs.get_default_hub() assert not default.__name__.endswith('kqueue') import eventlet.hubs.kqueue assert not eventlet.hubs.kqueue.is_available() print('pass') eventlet-0.30.2/tests/isolated/hub_use_hub_class.py0000644000076500000240000000041014006212666023047 0ustar temotostaff00000000000000from __future__ import print_function __test__ = False class Foo(object): pass if __name__ == '__main__': import eventlet.hubs eventlet.hubs.use_hub(Foo) hub = eventlet.hubs.get_hub() assert isinstance(hub, Foo), repr(hub) print('pass') eventlet-0.30.2/tests/isolated/mysqldb_monkey_patch.py0000644000076500000240000000074514006212666023621 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import MySQLdb as m from eventlet import patcher from eventlet.green import MySQLdb as gm patcher.monkey_patch(all=True, MySQLdb=True) patched_set = set(patcher.already_patched) - set(['psycopg']) assert patched_set == frozenset([ 'MySQLdb', 'os', 'select', 'socket', 'subprocess', 'thread', 'time', ]) assert m.connect == gm.connect print('pass') eventlet-0.30.2/tests/isolated/patcher_blocking_select_methods_are_deleted.py0000644000076500000240000000160314006212666030274 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import eventlet eventlet.monkey_patch() # Leaving unpatched select methods in the select module is a recipe # for trouble and this test makes sure we don't do that. # # Issues: # * https://bitbucket.org/eventlet/eventlet/issues/167 # * https://github.com/eventlet/eventlet/issues/169 import select for name in ['devpoll', 'poll', 'epoll', 'kqueue', 'kevent']: assert not hasattr(select, name), name import sys if sys.version_info >= (3, 4): import selectors for name in [ 'PollSelector', 'EpollSelector', 'DevpollSelector', 'KqueueSelector', ]: assert not hasattr(selectors, name), name default = selectors.DefaultSelector assert default is selectors.SelectSelector, default print('pass') eventlet-0.30.2/tests/isolated/patcher_builtin.py0000644000076500000240000000074014006212666022554 0ustar temotostaff00000000000000if __name__ == '__main__': from tests.mock import patch import sys import eventlet from eventlet import hubs with patch.object(hubs, 'notify_opened') as mock_func: eventlet.monkey_patch(builtins=True) with open(__file__, 'r') as f: mock_func.assert_called_with(f.fileno()) if sys.version_info.major == 2: with file(__file__, 'r') as f: mock_func.assert_called_with(f.fileno()) print('pass') eventlet-0.30.2/tests/isolated/patcher_existing_locks_early.py0000644000076500000240000000075114006212666025331 0ustar temotostaff00000000000000__test__ = False def aaa(lock, e1, e2): e1.set() with lock: e2.wait() def bbb(lock, e1, e2): e1.wait() e2.set() with lock: pass if __name__ == '__main__': import threading test_lock = threading.RLock() import eventlet eventlet.monkey_patch() e1, e2 = threading.Event(), threading.Event() a = eventlet.spawn(aaa, test_lock, e1, e2) b = eventlet.spawn(bbb, test_lock, e1, e2) a.wait() b.wait() print('pass') eventlet-0.30.2/tests/isolated/patcher_existing_locks_late.py0000644000076500000240000000075114006212666025142 0ustar temotostaff00000000000000__test__ = False def aaa(lock, e1, e2): e1.set() with lock: e2.wait() def bbb(lock, e1, e2): e1.wait() e2.set() with lock: pass if __name__ == '__main__': import threading import eventlet eventlet.monkey_patch() test_lock = threading.RLock() e1, e2 = threading.Event(), threading.Event() a = eventlet.spawn(aaa, test_lock, e1, e2) b = eventlet.spawn(bbb, test_lock, e1, e2) a.wait() b.wait() print('pass') eventlet-0.30.2/tests/isolated/patcher_existing_locks_locked.py0000644000076500000240000000154114006212666025454 0ustar temotostaff00000000000000__test__ = False def take(lock, sync1, sync2): sync2.acquire() sync1.release() with lock: sync2.release() if __name__ == '__main__': import sys import threading lock = threading.RLock() lock.acquire() import eventlet eventlet.monkey_patch() lock.release() try: lock.release() except RuntimeError as e: assert e.args == ('cannot release un-acquired lock',) lock.acquire() sync1 = threading.Lock() sync2 = threading.Lock() sync1.acquire() eventlet.spawn(take, lock, sync1, sync2) # Ensure sync2 has been taken with sync1: pass # an RLock should be reentrant lock.acquire() lock.release() lock.release() # To acquire sync2, 'take' must have acquired lock, which has been locked # until now sync2.acquire() print('pass') eventlet-0.30.2/tests/isolated/patcher_existing_locks_unlocked.py0000644000076500000240000000070114006212666026014 0ustar temotostaff00000000000000__test__ = False def take(lock, e1, e2): with lock: e1.set() e2.wait() if __name__ == '__main__': import sys import threading lock = threading.RLock() import eventlet eventlet.monkey_patch() lock.acquire() lock.release() e1, e2 = threading.Event(), threading.Event() eventlet.spawn(take, lock, e1, e2) e1.wait() assert not lock.acquire(blocking=0) e2.set() print('pass') eventlet-0.30.2/tests/isolated/patcher_fork_after_monkey_patch.py0000644000076500000240000000360114006212666025770 0ustar temotostaff00000000000000# Monkey patching interferes with threading in Python 3.7 # https://github.com/eventlet/eventlet/issues/592 __test__ = False def check(n, mod, tag): assert len(mod._active) == n, 'Expected {} {} threads, got {}'.format(n, tag, mod._active) if __name__ == '__main__': import eventlet import eventlet.patcher eventlet.monkey_patch() import os import sys import threading _threading = eventlet.patcher.original('threading') import eventlet.green.threading def target(): eventlet.sleep(0.1) threads = [ threading.Thread(target=target, name='patched'), _threading.Thread(target=target, name='original-1'), _threading.Thread(target=target, name='original-2'), eventlet.green.threading.Thread(target=target, name='green-1'), eventlet.green.threading.Thread(target=target, name='green-2'), eventlet.green.threading.Thread(target=target, name='green-3'), ] for t in threads: t.start() check(2, threading, 'pre-fork patched') check(3, _threading, 'pre-fork original') check(4, eventlet.green.threading, 'pre-fork green') if os.fork() == 0: # Inside the child, we should only have a main thread, # but old pythons make it difficult to ensure if sys.version_info >= (3, 7): check(1, threading, 'child post-fork patched') check(1, _threading, 'child post-fork original') check(1, eventlet.green.threading, 'child post-fork green') sys.exit() else: os.wait() check(2, threading, 'post-fork patched') check(3, _threading, 'post-fork original') check(4, eventlet.green.threading, 'post-fork green') for t in threads: t.join() check(1, threading, 'post-join patched') check(1, _threading, 'post-join original') check(1, eventlet.green.threading, 'post-join green') print('pass') eventlet-0.30.2/tests/isolated/patcher_import_patched_defaults.py0000644000076500000240000000123214006212666025774 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import sys # On eventlet<=0.20.0 uncommenting this unpatched import fails test # because import_patched did not agressively repatch sub-imported modules cached in sys.modules # to be fixed in https://github.com/eventlet/eventlet/issues/368 # import tests.patcher.shared_import_socket import eventlet target = eventlet.import_patched('tests.patcher.shared1').shared t = target.socket.socket import eventlet.green.socket as g if not issubclass(t, g.socket): print('Fail. Target socket not green: {0} bases {1}'.format(t, t.__bases__)) sys.exit(1) print('pass') eventlet-0.30.2/tests/isolated/patcher_importlib_lock.py0000644000076500000240000000074214006212666024121 0ustar temotostaff00000000000000__test__ = False def do_import(): import encodings.idna if __name__ == '__main__': import sys import eventlet eventlet.monkey_patch() threading = eventlet.patcher.original('threading') sys.modules.pop('encodings.idna', None) # call "import encodings.idna" in a new thread thread = threading.Thread(target=do_import) thread.start() # call "import encodings.idna" in the main thread do_import() thread.join() print('pass') eventlet-0.30.2/tests/isolated/patcher_open_kwargs.py0000644000076500000240000000030614006212666023423 0ustar temotostaff00000000000000__test__ = False if __name__ == "__main__": import eventlet eventlet.monkey_patch(builtins=True, os=True) with open(__file__, mode="rt", buffering=16): pass print("pass") eventlet-0.30.2/tests/isolated/patcher_socketserver_selectors.py0000644000076500000240000000202514006212666025706 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import eventlet eventlet.monkey_patch() from six.moves.BaseHTTPServer import ( HTTPServer, BaseHTTPRequestHandler, ) import threading server = HTTPServer(('localhost', 0), BaseHTTPRequestHandler) thread = threading.Thread(target=server.serve_forever) # Before fixing it the code would never go pass this line because: # * socketserver.BaseServer that's used behind the scenes here uses # selectors.PollSelector if it's available and we don't have green poll # implementation so this just couldn't work # * making socketserver use selectors.SelectSelector wasn't enough as # until now we just failed to monkey patch selectors module # # Due to the issues above this thread.start() call effectively behaved # like calling server.serve_forever() directly in the current thread # # Original report: https://github.com/eventlet/eventlet/issues/249 thread.start() server.shutdown() print('pass') eventlet-0.30.2/tests/isolated/patcher_threading_condition.py0000644000076500000240000000057614006212666025130 0ustar temotostaff00000000000000# Issue #185: test threading.Condition with monkey-patching __test__ = False if __name__ == '__main__': import eventlet eventlet.monkey_patch() import threading def func(c): with c: c.notify() c = threading.Condition() with c: t = threading.Thread(target=func, args=(c,)) t.start() c.wait() print('pass') eventlet-0.30.2/tests/isolated/patcher_threading_current.py0000644000076500000240000000104714006212666024616 0ustar temotostaff00000000000000# Threading.current_thread does not change when using greenthreads? # https://github.com/eventlet/eventlet/issues/172 __test__ = False if __name__ == '__main__': import eventlet eventlet.monkey_patch() import threading g = set() def fun(): ct = threading.current_thread() g.add(ct.name) ts = tuple(threading.Thread(target=fun, name='t{}'.format(i)) for i in range(3)) for t in ts: t.start() for t in ts: t.join() assert g == set(('t0', 't1', 't2')), repr(g) print('pass') eventlet-0.30.2/tests/isolated/patcher_threading_join.py0000644000076500000240000000066314006212666024076 0ustar temotostaff00000000000000# Issue #223: test threading.Thread.join with monkey-patching __test__ = False if __name__ == '__main__': import eventlet eventlet.monkey_patch() import threading import time sleeper = threading.Thread(target=time.sleep, args=(1,)) start = time.time() sleeper.start() sleeper.join() dt = time.time() - start if dt < 1.0: raise Exception("test failed: dt=%s" % dt) print('pass') eventlet-0.30.2/tests/isolated/patcher_threadpoolexecutor.py0000644000076500000240000000100414006212666025020 0ustar temotostaff00000000000000# Issue #508: test ThreadPoolExecutor with monkey-patching __test__ = False if __name__ == '__main__': import eventlet eventlet.monkey_patch() import sys # Futures is only included in 3.2 or later if sys.version_info >= (3, 2): from concurrent import futures with futures.ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(pow, 2, 3) res = future.result() assert res == 8, '2^3 should be 8, not %s' % res print('pass') eventlet-0.30.2/tests/isolated/regular_file_readall.py0000644000076500000240000000214414006212666023524 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import eventlet eventlet.monkey_patch() import six import io import os import tempfile with tempfile.NamedTemporaryFile() as tmp: with io.open(tmp.name, "wb") as fp: fp.write(b"content") # test BufferedReader.read() fd = os.open(tmp.name, os.O_RDONLY) fp = os.fdopen(fd, "rb") with fp: content = fp.read() assert content == b'content' # test FileIO.read() fd = os.open(tmp.name, os.O_RDONLY) fp = os.fdopen(fd, "rb", 0) with fp: content = fp.read() assert content == b'content' if six.PY3: # test FileIO.readall() fd = os.open(tmp.name, os.O_RDONLY) fp = os.fdopen(fd, "rb", 0) with fp: content = fp.readall() assert content == b'content' # test FileIO.readall() (for Python 2 and Python 3) with io.open(tmp.name, "rb", 0) as fp: content = fp.readall() assert content == b'content' print("pass") eventlet-0.30.2/tests/isolated/socket_resolve_green.py0000644000076500000240000000267414006212666023617 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import eventlet eventlet.monkey_patch(all=True) import socket import time import dns.message import dns.query n = 10 delay = 0.01 addr_map = {'test-host{0}.'.format(i): '0.0.1.{0}'.format(i) for i in range(n)} def slow_udp(q, *a, **kw): qname = q.question[0].name addr = addr_map[qname.to_text()] r = dns.message.make_response(q) r.index = None r.flags = 256 r.answer.append(dns.rrset.from_text(str(qname), 60, 'IN', 'A', addr)) r.time = 0.001 eventlet.sleep(delay) return r dns.query.tcp = lambda: eventlet.Timeout(0) dns.query.udp = slow_udp results = {} def fun(name): try: results[name] = socket.gethostbyname(name) except socket.error as e: print('name: {0} error: {1}'.format(name, e)) pool = eventlet.GreenPool(size=n + 1) # FIXME: For unknown reason, first GreenPool.spawn() takes ~250ms on some platforms. # Spawned function executes for normal expected time, it's the GreenPool who needs warmup. pool.spawn(eventlet.sleep) t1 = time.time() for name in addr_map: pool.spawn(fun, name) pool.waitall() td = time.time() - t1 fail_msg = 'Resolve time expected: ~{0:.3f}s, real: {1:.3f}'.format(delay, td) assert delay <= td < delay * n, fail_msg assert addr_map == results print('pass') eventlet-0.30.2/tests/isolated/subprocess_exception_identity.py0000644000076500000240000000052614006212666025561 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import subprocess as original from eventlet.green import subprocess as green cases = ( 'CalledProcessError', 'TimeoutExpired', ) for c in cases: if hasattr(original, c): assert getattr(green, c) is getattr(original, c), c print('pass') eventlet-0.30.2/tests/isolated/subprocess_patched_communicate.py0000644000076500000240000000041414006212666025642 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import sys import eventlet import subprocess eventlet.monkey_patch(all=True) p = subprocess.Popen([sys.executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() print('pass') eventlet-0.30.2/tests/isolated/test_sub_module_in_import_patched/0000755000076500000240000000000014017673044025775 5ustar temotostaff00000000000000eventlet-0.30.2/tests/isolated/test_sub_module_in_import_patched/__init__.py0000644000076500000240000000000014006212666030071 0ustar temotostaff00000000000000eventlet-0.30.2/tests/isolated/test_sub_module_in_import_patched/sample_main_module/0000755000076500000240000000000014017673044031627 5ustar temotostaff00000000000000eventlet-0.30.2/tests/isolated/test_sub_module_in_import_patched/sample_main_module/__init__.py0000644000076500000240000000041214006212666033732 0ustar temotostaff00000000000000""" This file is used together with sample_sub_module/__init__.py to setup a scenario where symbols are imported from sub modules. It is used to test that pacher.import_patched can correctly patch such symbols. """ from .sample_sub_module import function_use_socket././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000eventlet-0.30.2/tests/isolated/test_sub_module_in_import_patched/sample_main_module/sample_sub_module/eventlet-0.30.2/tests/isolated/test_sub_module_in_import_patched/sample_main_module/sample_sub_modul0000755000076500000240000000000014017673044035102 5ustar temotostaff00000000000000././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000eventlet-0.30.2/tests/isolated/test_sub_module_in_import_patched/sample_main_module/sample_sub_module/__init__.pyeventlet-0.30.2/tests/isolated/test_sub_module_in_import_patched/sample_main_module/sample_sub_modul0000644000076500000240000000042514006212666035102 0ustar temotostaff00000000000000""" This file is used together with sample_main_module/__init__.py to setup a scenario where symbols are imported from sub modules. It is used to test that pacher.import_patched can correctly patch such symbols. """ import socket def function_use_socket(): return socket eventlet-0.30.2/tests/isolated/test_sub_module_in_import_patched/test.py0000644000076500000240000000065114006212666027325 0ustar temotostaff00000000000000import eventlet import tests.isolated.test_sub_module_in_import_patched.sample_main_module as test_module if __name__ == '__main__': patched_module = eventlet.import_patched( 'tests.isolated.test_sub_module_in_import_patched.sample_main_module') assert patched_module.function_use_socket() is eventlet.green.socket assert test_module.function_use_socket() is not eventlet.green.socket print('pass') eventlet-0.30.2/tests/isolated/tpool_exception_leak.py0000644000076500000240000000213314006212666023605 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import eventlet import eventlet.tpool import gc import pprint class RequiredException(Exception): pass class A(object): def ok(self): return 'ok' def err(self): raise RequiredException a = A() # case 1 no exception assert eventlet.tpool.Proxy(a).ok() == 'ok' # yield to tpool_trampoline(), otherwise e.send(rv) have a reference eventlet.sleep(0.1) gc.collect() refs = gc.get_referrers(a) assert len(refs) == 1, 'tpool.Proxy-ied object leaked: {}'.format(pprint.pformat(refs)) # case 2 with exception def test_exception(): try: eventlet.tpool.Proxy(a).err() assert False, 'expected exception' except RequiredException: pass test_exception() # yield to tpool_trampoline(), otherwise e.send(rv) have a reference eventlet.sleep(0.1) gc.collect() refs = gc.get_referrers(a) assert len(refs) == 1, 'tpool.Proxy-ied object leaked: {}'.format(pprint.pformat(refs)) print('pass') eventlet-0.30.2/tests/isolated/tpool_isolate_socket_default_timeout.py0000644000076500000240000000043414006212666027077 0ustar temotostaff00000000000000__test__ = False if __name__ == '__main__': import eventlet import eventlet.tpool import socket def do(): eventlet.sleep(0.2) return True socket.setdefaulttimeout(0.05) result = eventlet.tpool.execute(do) assert result print('pass') eventlet-0.30.2/tests/isolated/wsgi_connection_timeout.py0000644000076500000240000001273514006212666024345 0ustar temotostaff00000000000000"""Issue #143 - Socket timeouts in wsgi server not caught. https://bitbucket.org/eventlet/eventlet/issue/143/ This file intentionally ignored by nose. Caller process (tests.wsgi_test.TestWsgiConnTimeout) handles success / failure Simulate server connection socket timeout without actually waiting. Logs 'timed out' if server debug=True (similar to 'accepted' logging) FAIL: if log (ie, _spawn_n_impl 'except:' catches timeout, logs TB) NOTE: timeouts are NOT on server_sock, but on the conn sockets produced by the socket.accept() call server's socket.listen() sock - NaughtySocketAcceptWrap / | \ | | | (1 - many) V V V server / client accept() conn - ExplodingConnectionWrap / | \ | | | (1 - many) V V V connection makefile() file objects - ExplodingSocketFile <-- these raise """ import socket import eventlet import six import tests.wsgi_test # no standard tests in this file, ignore __test__ = False TAG_BOOM = "=== ~* BOOM *~ ===" output_buffer = [] class BufferLog(object): @staticmethod def write(s): output_buffer.append(s.rstrip()) return len(s) # This test might make you wince class NaughtySocketAcceptWrap(object): # server's socket.accept(); patches resulting connection sockets def __init__(self, sock): self.sock = sock self.sock._really_accept = self.sock.accept self.sock.accept = self self.conn_reg = [] def unwrap(self): self.sock.accept = self.sock._really_accept del self.sock._really_accept for conn_wrap in self.conn_reg: conn_wrap.unwrap() def arm(self): output_buffer.append("ca-click") for i in self.conn_reg: i.arm() def __call__(self): output_buffer.append(self.__class__.__name__ + ".__call__") conn, addr = self.sock._really_accept() self.conn_reg.append(ExplodingConnectionWrap(conn)) return conn, addr class ExplodingConnectionWrap(object): # new connection's socket.makefile # eventlet *tends* to use socket.makefile, not raw socket methods. # need to patch file operations def __init__(self, conn): self.conn = conn self.conn._really_makefile = self.conn.makefile self.conn.makefile = self self.armed = False self.file_reg = [] def unwrap(self): self.conn.makefile = self.conn._really_makefile del self.conn._really_makefile def arm(self): output_buffer.append("tick") for i in self.file_reg: i.arm() def __call__(self, mode='r', bufsize=-1): output_buffer.append(self.__class__.__name__ + ".__call__") # file_obj = self.conn._really_makefile(*args, **kwargs) file_obj = ExplodingSocketFile(self.conn._sock, mode, bufsize) self.file_reg.append(file_obj) return file_obj class ExplodingSocketFile(eventlet.greenio._fileobject): def __init__(self, sock, mode='rb', bufsize=-1, close=False): args = [bufsize, close] if six.PY2 else [] super(self.__class__, self).__init__(sock, mode, *args) self.armed = False def arm(self): output_buffer.append("beep") self.armed = True def _fuse(self): if self.armed: output_buffer.append(TAG_BOOM) raise socket.timeout("timed out") def readline(self, *args, **kwargs): output_buffer.append(self.__class__.__name__ + ".readline") self._fuse() return super(self.__class__, self).readline(*args, **kwargs) def step(debug): output_buffer[:] = [] server_sock = eventlet.listen(('localhost', 0)) server_addr = server_sock.getsockname() sock_wrap = NaughtySocketAcceptWrap(server_sock) eventlet.spawn_n( eventlet.wsgi.server, debug=debug, log=BufferLog, max_size=128, site=tests.wsgi_test.Site(), sock=server_sock, ) try: # req #1 - normal sock1 = eventlet.connect(server_addr) sock1.settimeout(0.1) fd1 = sock1.makefile('rwb') fd1.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') fd1.flush() tests.wsgi_test.read_http(sock1) # let the server socket ops catch up, set bomb eventlet.sleep(0) output_buffer.append("arming...") sock_wrap.arm() # req #2 - old conn, post-arm - timeout fd1.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') fd1.flush() try: tests.wsgi_test.read_http(sock1) assert False, 'Expected ConnectionClosed exception' except tests.wsgi_test.ConnectionClosed: pass fd1.close() sock1.close() finally: # reset streams, then output trapped tracebacks sock_wrap.unwrap() # check output asserts in tests.wsgi_test.TestHttpd # test_143_server_connection_timeout_exception return output_buffer[:] def main(): output_normal = step(debug=False) output_debug = step(debug=True) assert "timed out" in output_debug[-1], repr(output_debug) # if the BOOM check fails, it's because our timeout didn't happen # (if eventlet stops using file.readline() to read HTTP headers, # for instance) assert TAG_BOOM == output_debug[-2], repr(output_debug) assert TAG_BOOM == output_normal[-1], repr(output_normal) assert "Traceback" not in output_debug, repr(output_debug) assert "Traceback" not in output_normal, repr(output_normal) print("pass") if __name__ == '__main__': main() eventlet-0.30.2/tests/manual/0000755000076500000240000000000014017673044016501 5ustar temotostaff00000000000000eventlet-0.30.2/tests/manual/__init__.py0000644000076500000240000000000014006212666020575 0ustar temotostaff00000000000000eventlet-0.30.2/tests/manual/greenio_memtest.py0000644000076500000240000000352014006212666022236 0ustar temotostaff00000000000000import eventlet from eventlet import greenio import os __test__ = False _proc_status = '/proc/%d/status' % os.getpid() _scale = {'kB': 1024.0, 'mB': 1024.0 * 1024.0, 'KB': 1024.0, 'MB': 1024.0 * 1024.0} def _VmB(VmKey): '''Private. ''' global _proc_status, _scale # get pseudo file /proc//status try: t = open(_proc_status) v = t.read() t.close() except: return 0.0 # non-Linux? # get VmKey line e.g. 'VmRSS: 9999 kB\n ...' i = v.index(VmKey) v = v[i:].split(None, 3) # whitespace if len(v) < 3: return 0.0 # invalid format? # convert Vm value to bytes return float(v[1]) * _scale[v[2]] def memory(since=0.0): '''Return memory usage in bytes. ''' return _VmB('VmSize:') - since def resident(since=0.0): '''Return resident memory usage in bytes. ''' return _VmB('VmRSS:') - since def stacksize(since=0.0): '''Return stack size in bytes. ''' return _VmB('VmStk:') - since def test_pipe_writes_large_messages(): r, w = os.pipe() r = greenio.GreenPipe(r) w = greenio.GreenPipe(w, 'w') large_message = b"".join([1024 * chr(i) for i in range(65)]) def writer(): w.write(large_message) w.close() gt = eventlet.spawn(writer) for i in range(65): buf = r.read(1024) expected = 1024 * chr(i) if buf != expected: print( "expected=%r..%r, found=%r..%r iter=%d" % (expected[:4], expected[-4:], buf[:4], buf[-4:], i)) gt.wait() if __name__ == "__main__": _iter = 1 while True: test_pipe_writes_large_messages() _iter += 1 if _iter % 10 == 0: print("_iter = %d, VmSize: %d, VmRSS = %d, VmStk = %d" % (_iter, memory(), resident(), stacksize())) eventlet-0.30.2/tests/manual/regress-226-unpatched-ssl.py0000644000076500000240000000012414006212666023576 0ustar temotostaff00000000000000import eventlet import requests requests.get('https://www.google.com/').status_code eventlet-0.30.2/tests/manual/websocket-gunicorn.py0000644000076500000240000000221714006212666022662 0ustar temotostaff00000000000000import eventlet.websocket import gunicorn import os import random import sys @eventlet.websocket.WebSocketWSGI def wsapp(ws): ws.send(b'test pass') ws.close() def app(environ, start_response): body = b'''

loading...

''' if environ.get('HTTP_UPGRADE') == 'websocket': return wsapp(environ, start_response) start_response( '200 OK', ( ('Content-type', 'text/html'), ('Content-Length', str(len(body))), ('X-Gunicorn-Version', gunicorn.__version__), ), ) return [body] if __name__ == '__main__': cmd = 'gunicorn websocket-gunicorn:app -b 127.0.0.1:5001 -k eventlet -w 1' sys.stderr.write('exec ' + cmd + '\n') os.system(cmd) eventlet-0.30.2/tests/mock.py0000644000076500000240000022560514006212666016536 0ustar temotostaff00000000000000# mock.py # Test tools for mocking and patching. # E-mail: fuzzyman AT voidspace DOT org DOT uk # # mock 1.0.1 # http://www.voidspace.org.uk/python/mock/ # # Copyright (c) 2007-2013, Michael Foord & the mock team # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. __all__ = ( 'Mock', 'MagicMock', 'patch', 'sentinel', 'DEFAULT', 'ANY', 'call', 'create_autospec', 'FILTER_DIR', 'NonCallableMock', 'NonCallableMagicMock', 'mock_open', 'PropertyMock', ) __version__ = '1.0.1' import pprint import sys try: import inspect except ImportError: # for alternative platforms that # may not have inspect inspect = None try: from functools import wraps as original_wraps except ImportError: # Python 2.4 compatibility def wraps(original): def inner(f): f.__name__ = original.__name__ f.__doc__ = original.__doc__ f.__module__ = original.__module__ wrapped = getattr(original, '__wrapped__', original) f.__wrapped__ = wrapped return f return inner else: if sys.version_info[:2] >= (3, 2): wraps = original_wraps else: def wraps(func): def inner(f): f = original_wraps(func)(f) wrapped = getattr(func, '__wrapped__', func) f.__wrapped__ = wrapped return f return inner try: unicode except NameError: # Python 3 basestring = unicode = str try: long except NameError: # Python 3 long = int try: BaseException except NameError: # Python 2.4 compatibility BaseException = Exception try: next except NameError: def next(obj): return obj.next() BaseExceptions = (BaseException,) if 'java' in sys.platform: # jython import java BaseExceptions = (BaseException, java.lang.Throwable) try: _isidentifier = str.isidentifier except AttributeError: # Python 2.X import keyword import re regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) def _isidentifier(string): if string in keyword.kwlist: return False return regex.match(string) inPy3k = sys.version_info[0] == 3 # Needed to work around Python 3 bug where use of "super" interferes with # defining __class__ as a descriptor _super = super self = 'im_self' builtin = '__builtin__' if inPy3k: self = '__self__' builtin = 'builtins' FILTER_DIR = True def _is_instance_mock(obj): # can't use isinstance on Mock objects because they override __class__ # The base class for all mocks is NonCallableMock return issubclass(type(obj), NonCallableMock) def _is_exception(obj): return ( isinstance(obj, BaseExceptions) or isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions) ) class _slotted(object): __slots__ = ['a'] DescriptorTypes = ( type(_slotted.a), property, ) def _getsignature(func, skipfirst, instance=False): if inspect is None: raise ImportError('inspect module not available') if isinstance(func, ClassTypes) and not instance: try: func = func.__init__ except AttributeError: return skipfirst = True elif not isinstance(func, FunctionTypes): # for classes where instance is True we end up here too try: func = func.__call__ except AttributeError: return if inPy3k: try: argspec = inspect.getfullargspec(func) except TypeError: # C function / method, possibly inherited object().__init__ return regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec else: try: regargs, varargs, varkwargs, defaults = inspect.getargspec(func) except TypeError: # C function / method, possibly inherited object().__init__ return # instance methods and classmethods need to lose the self argument if getattr(func, self, None) is not None: regargs = regargs[1:] if skipfirst: # this condition and the above one are never both True - why? regargs = regargs[1:] if inPy3k: signature = inspect.formatargspec( regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann, formatvalue=lambda value: "") else: signature = inspect.formatargspec( regargs, varargs, varkwargs, defaults, formatvalue=lambda value: "") return signature[1:-1], func def _check_signature(func, mock, skipfirst, instance=False): if not _callable(func): return result = _getsignature(func, skipfirst, instance) if result is None: return signature, func = result # can't use self because "self" is common as an argument name # unfortunately even not in the first place src = "lambda _mock_self, %s: None" % signature checksig = eval(src, {}) _copy_func_details(func, checksig) type(mock)._mock_check_sig = checksig def _copy_func_details(func, funcopy): funcopy.__name__ = func.__name__ funcopy.__doc__ = func.__doc__ # funcopy.__dict__.update(func.__dict__) funcopy.__module__ = func.__module__ if not inPy3k: funcopy.func_defaults = func.func_defaults return funcopy.__defaults__ = func.__defaults__ funcopy.__kwdefaults__ = func.__kwdefaults__ def _callable(obj): if isinstance(obj, ClassTypes): return True if getattr(obj, '__call__', None) is not None: return True return False def _is_list(obj): # checks for list or tuples # XXXX badly named! return type(obj) in (list, tuple) def _instance_callable(obj): """Given an object, return True if the object is callable. For classes, return True if instances would be callable.""" if not isinstance(obj, ClassTypes): # already an instance return getattr(obj, '__call__', None) is not None klass = obj # uses __bases__ instead of __mro__ so that we work with old style classes if klass.__dict__.get('__call__') is not None: return True for base in klass.__bases__: if _instance_callable(base): return True return False def _set_signature(mock, original, instance=False): # creates a function with signature (*args, **kwargs) that delegates to a # mock. It still does signature checking by calling a lambda with the same # signature as the original. if not _callable(original): return skipfirst = isinstance(original, ClassTypes) result = _getsignature(original, skipfirst, instance) if result is None: # was a C function (e.g. object().__init__ ) that can't be mocked return signature, func = result src = "lambda %s: None" % signature checksig = eval(src, {}) _copy_func_details(func, checksig) name = original.__name__ if not _isidentifier(name): name = 'funcopy' context = {'_checksig_': checksig, 'mock': mock} src = """def %s(*args, **kwargs): _checksig_(*args, **kwargs) return mock(*args, **kwargs)""" % name exec(src, context) funcopy = context[name] _setup_func(funcopy, mock) return funcopy def _setup_func(funcopy, mock): funcopy.mock = mock # can't use isinstance with mocks if not _is_instance_mock(mock): return def assert_called_with(*args, **kwargs): return mock.assert_called_with(*args, **kwargs) def assert_called_once_with(*args, **kwargs): return mock.assert_called_once_with(*args, **kwargs) def assert_has_calls(*args, **kwargs): return mock.assert_has_calls(*args, **kwargs) def assert_any_call(*args, **kwargs): return mock.assert_any_call(*args, **kwargs) def reset_mock(): funcopy.method_calls = _CallList() funcopy.mock_calls = _CallList() mock.reset_mock() ret = funcopy.return_value if _is_instance_mock(ret) and ret is not mock: ret.reset_mock() funcopy.called = False funcopy.call_count = 0 funcopy.call_args = None funcopy.call_args_list = _CallList() funcopy.method_calls = _CallList() funcopy.mock_calls = _CallList() funcopy.return_value = mock.return_value funcopy.side_effect = mock.side_effect funcopy._mock_children = mock._mock_children funcopy.assert_called_with = assert_called_with funcopy.assert_called_once_with = assert_called_once_with funcopy.assert_has_calls = assert_has_calls funcopy.assert_any_call = assert_any_call funcopy.reset_mock = reset_mock mock._mock_delegate = funcopy def _is_magic(name): return '__%s__' % name[2:-2] == name class _SentinelObject(object): "A unique, named, sentinel object." def __init__(self, name): self.name = name def __repr__(self): return 'sentinel.%s' % self.name class _Sentinel(object): """Access attributes to return a named object, usable as a sentinel.""" def __init__(self): self._sentinels = {} def __getattr__(self, name): if name == '__bases__': # Without this help(mock) raises an exception raise AttributeError return self._sentinels.setdefault(name, _SentinelObject(name)) sentinel = _Sentinel() DEFAULT = sentinel.DEFAULT _missing = sentinel.MISSING _deleted = sentinel.DELETED class OldStyleClass: pass ClassType = type(OldStyleClass) def _copy(value): if type(value) in (dict, list, tuple, set): return type(value)(value) return value ClassTypes = (type,) if not inPy3k: ClassTypes = (type, ClassType) _allowed_names = set( [ 'return_value', '_mock_return_value', 'side_effect', '_mock_side_effect', '_mock_parent', '_mock_new_parent', '_mock_name', '_mock_new_name' ] ) def _delegating_property(name): _allowed_names.add(name) _the_name = '_mock_' + name def _get(self, name=name, _the_name=_the_name): sig = self._mock_delegate if sig is None: return getattr(self, _the_name) return getattr(sig, name) def _set(self, value, name=name, _the_name=_the_name): sig = self._mock_delegate if sig is None: self.__dict__[_the_name] = value else: setattr(sig, name, value) return property(_get, _set) class _CallList(list): def __contains__(self, value): if not isinstance(value, list): return list.__contains__(self, value) len_value = len(value) len_self = len(self) if len_value > len_self: return False for i in range(0, len_self - len_value + 1): sub_list = self[i:i + len_value] if sub_list == value: return True return False def __repr__(self): return pprint.pformat(list(self)) def _check_and_set_parent(parent, value, name, new_name): if not _is_instance_mock(value): return False if ((value._mock_name or value._mock_new_name) or (value._mock_parent is not None) or (value._mock_new_parent is not None)): return False _parent = parent while _parent is not None: # setting a mock (value) as a child or return value of itself # should not modify the mock if _parent is value: return False _parent = _parent._mock_new_parent if new_name: value._mock_new_parent = parent value._mock_new_name = new_name if name: value._mock_parent = parent value._mock_name = name return True class Base(object): _mock_return_value = DEFAULT _mock_side_effect = None def __init__(self, *args, **kwargs): pass class NonCallableMock(Base): """A non-callable version of `Mock`""" def __new__(cls, *args, **kw): # every instance has its own class # so we can create magic methods on the # class without stomping on other mocks new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__}) instance = object.__new__(new) return instance def __init__( self, spec=None, wraps=None, name=None, spec_set=None, parent=None, _spec_state=None, _new_name='', _new_parent=None, **kwargs ): if _new_parent is None: _new_parent = parent __dict__ = self.__dict__ __dict__['_mock_parent'] = parent __dict__['_mock_name'] = name __dict__['_mock_new_name'] = _new_name __dict__['_mock_new_parent'] = _new_parent if spec_set is not None: spec = spec_set spec_set = True self._mock_add_spec(spec, spec_set) __dict__['_mock_children'] = {} __dict__['_mock_wraps'] = wraps __dict__['_mock_delegate'] = None __dict__['_mock_called'] = False __dict__['_mock_call_args'] = None __dict__['_mock_call_count'] = 0 __dict__['_mock_call_args_list'] = _CallList() __dict__['_mock_mock_calls'] = _CallList() __dict__['method_calls'] = _CallList() if kwargs: self.configure_mock(**kwargs) _super(NonCallableMock, self).__init__( spec, wraps, name, spec_set, parent, _spec_state ) def attach_mock(self, mock, attribute): """ Attach a mock as an attribute of this one, replacing its name and parent. Calls to the attached mock will be recorded in the `method_calls` and `mock_calls` attributes of this one.""" mock._mock_parent = None mock._mock_new_parent = None mock._mock_name = '' mock._mock_new_name = None setattr(self, attribute, mock) def mock_add_spec(self, spec, spec_set=False): """Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.""" self._mock_add_spec(spec, spec_set) def _mock_add_spec(self, spec, spec_set): _spec_class = None if spec is not None and not _is_list(spec): if isinstance(spec, ClassTypes): _spec_class = spec else: _spec_class = _get_class(spec) spec = dir(spec) __dict__ = self.__dict__ __dict__['_spec_class'] = _spec_class __dict__['_spec_set'] = spec_set __dict__['_mock_methods'] = spec def __get_return_value(self): ret = self._mock_return_value if self._mock_delegate is not None: ret = self._mock_delegate.return_value if ret is DEFAULT: ret = self._get_child_mock( _new_parent=self, _new_name='()' ) self.return_value = ret return ret def __set_return_value(self, value): if self._mock_delegate is not None: self._mock_delegate.return_value = value else: self._mock_return_value = value _check_and_set_parent(self, value, None, '()') __return_value_doc = "The value to be returned when the mock is called." return_value = property(__get_return_value, __set_return_value, __return_value_doc) @property def __class__(self): if self._spec_class is None: return type(self) return self._spec_class called = _delegating_property('called') call_count = _delegating_property('call_count') call_args = _delegating_property('call_args') call_args_list = _delegating_property('call_args_list') mock_calls = _delegating_property('mock_calls') def __get_side_effect(self): sig = self._mock_delegate if sig is None: return self._mock_side_effect return sig.side_effect def __set_side_effect(self, value): value = _try_iter(value) sig = self._mock_delegate if sig is None: self._mock_side_effect = value else: sig.side_effect = value side_effect = property(__get_side_effect, __set_side_effect) def reset_mock(self): "Restore the mock object to its initial state." self.called = False self.call_args = None self.call_count = 0 self.mock_calls = _CallList() self.call_args_list = _CallList() self.method_calls = _CallList() for child in self._mock_children.values(): if isinstance(child, _SpecState): continue child.reset_mock() ret = self._mock_return_value if _is_instance_mock(ret) and ret is not self: ret.reset_mock() def configure_mock(self, **kwargs): """Set attributes on the mock through keyword arguments. Attributes plus return values and side effects can be set on child mocks using standard dot notation and unpacking a dictionary in the method call: >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError} >>> mock.configure_mock(**attrs)""" for arg, val in sorted(kwargs.items(), # we sort on the number of dots so that # attributes are set before we set attributes on # attributes key=lambda entry: entry[0].count('.')): args = arg.split('.') final = args.pop() obj = self for entry in args: obj = getattr(obj, entry) setattr(obj, final, val) def __getattr__(self, name): if name == '_mock_methods': raise AttributeError(name) elif self._mock_methods is not None: if name not in self._mock_methods or name in _all_magics: raise AttributeError("Mock object has no attribute %r" % name) elif _is_magic(name): raise AttributeError(name) result = self._mock_children.get(name) if result is _deleted: raise AttributeError(name) elif result is None: wraps = None if self._mock_wraps is not None: # XXXX should we get the attribute without triggering code # execution? wraps = getattr(self._mock_wraps, name) result = self._get_child_mock( parent=self, name=name, wraps=wraps, _new_name=name, _new_parent=self ) self._mock_children[name] = result elif isinstance(result, _SpecState): result = create_autospec( result.spec, result.spec_set, result.instance, result.parent, result.name ) self._mock_children[name] = result return result def __repr__(self): _name_list = [self._mock_new_name] _parent = self._mock_new_parent last = self dot = '.' if _name_list == ['()']: dot = '' seen = set() while _parent is not None: last = _parent _name_list.append(_parent._mock_new_name + dot) dot = '.' if _parent._mock_new_name == '()': dot = '' _parent = _parent._mock_new_parent # use ids here so as not to call __hash__ on the mocks if id(_parent) in seen: break seen.add(id(_parent)) _name_list = list(reversed(_name_list)) _first = last._mock_name or 'mock' if len(_name_list) > 1: if _name_list[1] not in ('()', '().'): _first += '.' _name_list[0] = _first name = ''.join(_name_list) name_string = '' if name not in ('mock', 'mock.'): name_string = ' name=%r' % name spec_string = '' if self._spec_class is not None: spec_string = ' spec=%r' if self._spec_set: spec_string = ' spec_set=%r' spec_string = spec_string % self._spec_class.__name__ return "<%s%s%s id='%s'>" % ( type(self).__name__, name_string, spec_string, id(self) ) def __dir__(self): """Filter the output of `dir(mock)` to only useful members.""" extras = self._mock_methods or [] from_type = dir(type(self)) from_dict = list(self.__dict__) if FILTER_DIR: from_type = [e for e in from_type if not e.startswith('_')] from_dict = [e for e in from_dict if not e.startswith('_') or _is_magic(e)] return sorted(set(extras + from_type + from_dict + list(self._mock_children))) def __setattr__(self, name, value): if name in _allowed_names: # property setters go through here return object.__setattr__(self, name, value) elif (self._spec_set and self._mock_methods is not None and name not in self._mock_methods and name not in self.__dict__): raise AttributeError("Mock object has no attribute '%s'" % name) elif name in _unsupported_magics: msg = 'Attempting to set unsupported magic method %r.' % name raise AttributeError(msg) elif name in _all_magics: if self._mock_methods is not None and name not in self._mock_methods: raise AttributeError("Mock object has no attribute '%s'" % name) if not _is_instance_mock(value): setattr(type(self), name, _get_method(name, value)) original = value value = lambda *args, **kw: original(self, *args, **kw) else: # only set _new_name and not name so that mock_calls is tracked # but not method calls _check_and_set_parent(self, value, None, name) setattr(type(self), name, value) self._mock_children[name] = value elif name == '__class__': self._spec_class = value return else: if _check_and_set_parent(self, value, name, name): self._mock_children[name] = value return object.__setattr__(self, name, value) def __delattr__(self, name): if name in _all_magics and name in type(self).__dict__: delattr(type(self), name) if name not in self.__dict__: # for magic methods that are still MagicProxy objects and # not set on the instance itself return if name in self.__dict__: object.__delattr__(self, name) obj = self._mock_children.get(name, _missing) if obj is _deleted: raise AttributeError(name) if obj is not _missing: del self._mock_children[name] self._mock_children[name] = _deleted def _format_mock_call_signature(self, args, kwargs): name = self._mock_name or 'mock' return _format_call_signature(name, args, kwargs) def _format_mock_failure_message(self, args, kwargs): message = 'Expected call: %s\nActual call: %s' expected_string = self._format_mock_call_signature(args, kwargs) call_args = self.call_args if len(call_args) == 3: call_args = call_args[1:] actual_string = self._format_mock_call_signature(*call_args) return message % (expected_string, actual_string) def assert_called_with(_mock_self, *args, **kwargs): """assert that the mock was called with the specified arguments. Raises an AssertionError if the args and keyword args passed in are different to the last call to the mock.""" self = _mock_self if self.call_args is None: expected = self._format_mock_call_signature(args, kwargs) raise AssertionError('Expected call: %s\nNot called' % (expected,)) if self.call_args != (args, kwargs): msg = self._format_mock_failure_message(args, kwargs) raise AssertionError(msg) def assert_called_once_with(_mock_self, *args, **kwargs): """assert that the mock was called exactly once and with the specified arguments.""" self = _mock_self if not self.call_count == 1: msg = ("Expected to be called once. Called %s times." % self.call_count) raise AssertionError(msg) return self.assert_called_with(*args, **kwargs) def assert_has_calls(self, calls, any_order=False): """assert the mock has been called with the specified calls. The `mock_calls` list is checked for the calls. If `any_order` is False (the default) then the calls must be sequential. There can be extra calls before or after the specified calls. If `any_order` is True then the calls can be in any order, but they must all appear in `mock_calls`.""" if not any_order: if calls not in self.mock_calls: raise AssertionError( 'Calls not found.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) return all_calls = list(self.mock_calls) not_found = [] for kall in calls: try: all_calls.remove(kall) except ValueError: not_found.append(kall) if not_found: raise AssertionError( '%r not all found in call list' % (tuple(not_found),) ) def assert_any_call(self, *args, **kwargs): """assert the mock has been called with the specified arguments. The assert passes if the mock has *ever* been called, unlike `assert_called_with` and `assert_called_once_with` that only pass if the call is the most recent one.""" kall = call(*args, **kwargs) if kall not in self.call_args_list: expected_string = self._format_mock_call_signature(args, kwargs) raise AssertionError( '%s call not found' % expected_string ) def _get_child_mock(self, **kw): """Create the child mocks for attributes and return value. By default child mocks will be the same type as the parent. Subclasses of Mock may want to override this to customize the way child mocks are made. For non-callable mocks the callable variant will be used (rather than any custom subclass).""" _type = type(self) if not issubclass(_type, CallableMixin): if issubclass(_type, NonCallableMagicMock): klass = MagicMock elif issubclass(_type, NonCallableMock): klass = Mock else: klass = _type.__mro__[1] return klass(**kw) def _try_iter(obj): if obj is None: return obj if _is_exception(obj): return obj if _callable(obj): return obj try: return iter(obj) except TypeError: # XXXX backwards compatibility # but this will blow up on first call - so maybe we should fail early? return obj class CallableMixin(Base): def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, wraps=None, name=None, spec_set=None, parent=None, _spec_state=None, _new_name='', _new_parent=None, **kwargs): self.__dict__['_mock_return_value'] = return_value _super(CallableMixin, self).__init__( spec, wraps, name, spec_set, parent, _spec_state, _new_name, _new_parent, **kwargs ) self.side_effect = side_effect def _mock_check_sig(self, *args, **kwargs): # stub method that can be replaced with one with a specific signature pass def __call__(_mock_self, *args, **kwargs): # can't use self in-case a function / method we are mocking uses self # in the signature _mock_self._mock_check_sig(*args, **kwargs) return _mock_self._mock_call(*args, **kwargs) def _mock_call(_mock_self, *args, **kwargs): self = _mock_self self.called = True self.call_count += 1 self.call_args = _Call((args, kwargs), two=True) self.call_args_list.append(_Call((args, kwargs), two=True)) _new_name = self._mock_new_name _new_parent = self._mock_new_parent self.mock_calls.append(_Call(('', args, kwargs))) seen = set() skip_next_dot = _new_name == '()' do_method_calls = self._mock_parent is not None name = self._mock_name while _new_parent is not None: this_mock_call = _Call((_new_name, args, kwargs)) if _new_parent._mock_new_name: dot = '.' if skip_next_dot: dot = '' skip_next_dot = False if _new_parent._mock_new_name == '()': skip_next_dot = True _new_name = _new_parent._mock_new_name + dot + _new_name if do_method_calls: if _new_name == name: this_method_call = this_mock_call else: this_method_call = _Call((name, args, kwargs)) _new_parent.method_calls.append(this_method_call) do_method_calls = _new_parent._mock_parent is not None if do_method_calls: name = _new_parent._mock_name + '.' + name _new_parent.mock_calls.append(this_mock_call) _new_parent = _new_parent._mock_new_parent # use ids here so as not to call __hash__ on the mocks _new_parent_id = id(_new_parent) if _new_parent_id in seen: break seen.add(_new_parent_id) ret_val = DEFAULT effect = self.side_effect if effect is not None: if _is_exception(effect): raise effect if not _callable(effect): result = next(effect) if _is_exception(result): raise result return result ret_val = effect(*args, **kwargs) if ret_val is DEFAULT: ret_val = self.return_value if (self._mock_wraps is not None and self._mock_return_value is DEFAULT): return self._mock_wraps(*args, **kwargs) if ret_val is DEFAULT: ret_val = self.return_value return ret_val class Mock(CallableMixin, NonCallableMock): """ Create a new `Mock` object. `Mock` takes several optional arguments that specify the behaviour of the Mock object: * `spec`: This can be either a list of strings or an existing object (a class or instance) that acts as the specification for the mock object. If you pass in an object then a list of strings is formed by calling dir on the object (excluding unsupported magic attributes and methods). Accessing any attribute not in this list will raise an `AttributeError`. If `spec` is an object (rather than a list of strings) then `mock.__class__` returns the class of the spec object. This allows mocks to pass `isinstance` tests. * `spec_set`: A stricter variant of `spec`. If used, attempting to *set* or get an attribute on the mock that isn't on the object passed as `spec_set` will raise an `AttributeError`. * `side_effect`: A function to be called whenever the Mock is called. See the `side_effect` attribute. Useful for raising exceptions or dynamically changing return values. The function is called with the same arguments as the mock, and unless it returns `DEFAULT`, the return value of this function is used as the return value. Alternatively `side_effect` can be an exception class or instance. In this case the exception will be raised when the mock is called. If `side_effect` is an iterable then each call to the mock will return the next value from the iterable. If any of the members of the iterable are exceptions they will be raised instead of returned. * `return_value`: The value returned when the mock is called. By default this is a new Mock (created on first access). See the `return_value` attribute. * `wraps`: Item for the mock object to wrap. If `wraps` is not None then calling the Mock will pass the call through to the wrapped object (returning the real result). Attribute access on the mock will return a Mock object that wraps the corresponding attribute of the wrapped object (so attempting to access an attribute that doesn't exist will raise an `AttributeError`). If the mock has an explicit `return_value` set then calls are not passed to the wrapped object and the `return_value` is returned instead. * `name`: If the mock has a name then it will be used in the repr of the mock. This can be useful for debugging. The name is propagated to child mocks. Mocks can also be called with arbitrary keyword arguments. These will be used to set attributes on the mock after it is created. """ def _dot_lookup(thing, comp, import_path): try: return getattr(thing, comp) except AttributeError: __import__(import_path) return getattr(thing, comp) def _importer(target): components = target.split('.') import_path = components.pop(0) thing = __import__(import_path) for comp in components: import_path += ".%s" % comp thing = _dot_lookup(thing, comp, import_path) return thing def _is_started(patcher): # XXXX horrible return hasattr(patcher, 'is_local') class _patch(object): attribute_name = None _active_patches = set() def __init__( self, getter, attribute, new, spec, create, spec_set, autospec, new_callable, kwargs ): if new_callable is not None: if new is not DEFAULT: raise ValueError( "Cannot use 'new' and 'new_callable' together" ) if autospec is not None: raise ValueError( "Cannot use 'autospec' and 'new_callable' together" ) self.getter = getter self.attribute = attribute self.new = new self.new_callable = new_callable self.spec = spec self.create = create self.has_local = False self.spec_set = spec_set self.autospec = autospec self.kwargs = kwargs self.additional_patchers = [] def copy(self): patcher = _patch( self.getter, self.attribute, self.new, self.spec, self.create, self.spec_set, self.autospec, self.new_callable, self.kwargs ) patcher.attribute_name = self.attribute_name patcher.additional_patchers = [ p.copy() for p in self.additional_patchers ] return patcher def __call__(self, func): if isinstance(func, ClassTypes): return self.decorate_class(func) return self.decorate_callable(func) def decorate_class(self, klass): for attr in dir(klass): if not attr.startswith(patch.TEST_PREFIX): continue attr_value = getattr(klass, attr) if not hasattr(attr_value, "__call__"): continue patcher = self.copy() setattr(klass, attr, patcher(attr_value)) return klass def decorate_callable(self, func): if hasattr(func, 'patchings'): func.patchings.append(self) return func @wraps(func) def patched(*args, **keywargs): # don't use a with here (backwards compatibility with Python 2.4) extra_args = [] entered_patchers = [] # can't use try...except...finally because of Python 2.4 # compatibility exc_info = tuple() try: try: for patching in patched.patchings: arg = patching.__enter__() entered_patchers.append(patching) if patching.attribute_name is not None: keywargs.update(arg) elif patching.new is DEFAULT: extra_args.append(arg) args += tuple(extra_args) return func(*args, **keywargs) except: if (patching not in entered_patchers and _is_started(patching)): # the patcher may have been started, but an exception # raised whilst entering one of its additional_patchers entered_patchers.append(patching) # Pass the exception to __exit__ exc_info = sys.exc_info() # re-raise the exception raise finally: for patching in reversed(entered_patchers): patching.__exit__(*exc_info) patched.patchings = [self] if hasattr(func, 'func_code'): # not in Python 3 patched.compat_co_firstlineno = getattr( func, "compat_co_firstlineno", func.func_code.co_firstlineno ) return patched def get_original(self): target = self.getter() name = self.attribute original = DEFAULT local = False try: original = target.__dict__[name] except (AttributeError, KeyError): original = getattr(target, name, DEFAULT) else: local = True if not self.create and original is DEFAULT: raise AttributeError( "%s does not have the attribute %r" % (target, name) ) return original, local def __enter__(self): """Perform the patch.""" new, spec, spec_set = self.new, self.spec, self.spec_set autospec, kwargs = self.autospec, self.kwargs new_callable = self.new_callable self.target = self.getter() # normalise False to None if spec is False: spec = None if spec_set is False: spec_set = None if autospec is False: autospec = None if spec is not None and autospec is not None: raise TypeError("Can't specify spec and autospec") if ((spec is not None or autospec is not None) and spec_set not in (True, None)): raise TypeError("Can't provide explicit spec_set *and* spec or autospec") original, local = self.get_original() if new is DEFAULT and autospec is None: inherit = False if spec is True: # set spec to the object we are replacing spec = original if spec_set is True: spec_set = original spec = None elif spec is not None: if spec_set is True: spec_set = spec spec = None elif spec_set is True: spec_set = original if spec is not None or spec_set is not None: if original is DEFAULT: raise TypeError("Can't use 'spec' with create=True") if isinstance(original, ClassTypes): # If we're patching out a class and there is a spec inherit = True Klass = MagicMock _kwargs = {} if new_callable is not None: Klass = new_callable elif spec is not None or spec_set is not None: this_spec = spec if spec_set is not None: this_spec = spec_set if _is_list(this_spec): not_callable = '__call__' not in this_spec else: not_callable = not _callable(this_spec) if not_callable: Klass = NonCallableMagicMock if spec is not None: _kwargs['spec'] = spec if spec_set is not None: _kwargs['spec_set'] = spec_set # add a name to mocks if (isinstance(Klass, type) and issubclass(Klass, NonCallableMock) and self.attribute): _kwargs['name'] = self.attribute _kwargs.update(kwargs) new = Klass(**_kwargs) if inherit and _is_instance_mock(new): # we can only tell if the instance should be callable if the # spec is not a list this_spec = spec if spec_set is not None: this_spec = spec_set if (not _is_list(this_spec) and not _instance_callable(this_spec)): Klass = NonCallableMagicMock _kwargs.pop('name') new.return_value = Klass(_new_parent=new, _new_name='()', **_kwargs) elif autospec is not None: # spec is ignored, new *must* be default, spec_set is treated # as a boolean. Should we check spec is not None and that spec_set # is a bool? if new is not DEFAULT: raise TypeError( "autospec creates the mock for you. Can't specify " "autospec and new." ) if original is DEFAULT: raise TypeError("Can't use 'autospec' with create=True") spec_set = bool(spec_set) if autospec is True: autospec = original new = create_autospec(autospec, spec_set=spec_set, _name=self.attribute, **kwargs) elif kwargs: # can't set keyword args when we aren't creating the mock # XXXX If new is a Mock we could call new.configure_mock(**kwargs) raise TypeError("Can't pass kwargs to a mock we aren't creating") new_attr = new self.temp_original = original self.is_local = local setattr(self.target, self.attribute, new_attr) if self.attribute_name is not None: extra_args = {} if self.new is DEFAULT: extra_args[self.attribute_name] = new for patching in self.additional_patchers: arg = patching.__enter__() if patching.new is DEFAULT: extra_args.update(arg) return extra_args return new def __exit__(self, *exc_info): """Undo the patch.""" if not _is_started(self): raise RuntimeError('stop called on unstarted patcher') if self.is_local and self.temp_original is not DEFAULT: setattr(self.target, self.attribute, self.temp_original) else: delattr(self.target, self.attribute) if not self.create and not hasattr(self.target, self.attribute): # needed for proxy objects like django settings setattr(self.target, self.attribute, self.temp_original) del self.temp_original del self.is_local del self.target for patcher in reversed(self.additional_patchers): if _is_started(patcher): patcher.__exit__(*exc_info) def start(self): """Activate a patch, returning any created mock.""" result = self.__enter__() self._active_patches.add(self) return result def stop(self): """Stop an active patch.""" self._active_patches.discard(self) return self.__exit__() def _get_target(target): try: target, attribute = target.rsplit('.', 1) except (TypeError, ValueError): raise TypeError("Need a valid target to patch. You supplied: %r" % (target,)) getter = lambda: _importer(target) return getter, attribute def _patch_object( target, attribute, new=DEFAULT, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs ): """ patch.object(target, attribute, new=DEFAULT, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs) patch the named member (`attribute`) on an object (`target`) with a mock object. `patch.object` can be used as a decorator, class decorator or a context manager. Arguments `new`, `spec`, `create`, `spec_set`, `autospec` and `new_callable` have the same meaning as for `patch`. Like `patch`, `patch.object` takes arbitrary keyword arguments for configuring the mock object it creates. When used as a class decorator `patch.object` honours `patch.TEST_PREFIX` for choosing which methods to wrap. """ getter = lambda: target return _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, kwargs ) def _patch_multiple(target, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs): """Perform multiple patches in a single call. It takes the object to be patched (either as an object or a string to fetch the object by importing) and keyword arguments for the patches:: with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'): ... Use `DEFAULT` as the value if you want `patch.multiple` to create mocks for you. In this case the created mocks are passed into a decorated function by keyword, and a dictionary is returned when `patch.multiple` is used as a context manager. `patch.multiple` can be used as a decorator, class decorator or a context manager. The arguments `spec`, `spec_set`, `create`, `autospec` and `new_callable` have the same meaning as for `patch`. These arguments will be applied to *all* patches done by `patch.multiple`. When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX` for choosing which methods to wrap. """ if type(target) in (unicode, str): getter = lambda: _importer(target) else: getter = lambda: target if not kwargs: raise ValueError( 'Must supply at least one keyword argument with patch.multiple' ) # need to wrap in a list for python 3, where items is a view items = list(kwargs.items()) attribute, new = items[0] patcher = _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, {} ) patcher.attribute_name = attribute for attribute, new in items[1:]: this_patcher = _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, {} ) this_patcher.attribute_name = attribute patcher.additional_patchers.append(this_patcher) return patcher def patch( target, new=DEFAULT, spec=None, create=False, spec_set=None, autospec=None, new_callable=None, **kwargs ): """ `patch` acts as a function decorator, class decorator or a context manager. Inside the body of the function or with statement, the `target` is patched with a `new` object. When the function/with statement exits the patch is undone. If `new` is omitted, then the target is replaced with a `MagicMock`. If `patch` is used as a decorator and `new` is omitted, the created mock is passed in as an extra argument to the decorated function. If `patch` is used as a context manager the created mock is returned by the context manager. `target` should be a string in the form `'package.module.ClassName'`. The `target` is imported and the specified object replaced with the `new` object, so the `target` must be importable from the environment you are calling `patch` from. The target is imported when the decorated function is executed, not at decoration time. The `spec` and `spec_set` keyword arguments are passed to the `MagicMock` if patch is creating one for you. In addition you can pass `spec=True` or `spec_set=True`, which causes patch to pass in the object being mocked as the spec/spec_set object. `new_callable` allows you to specify a different class, or callable object, that will be called to create the `new` object. By default `MagicMock` is used. A more powerful form of `spec` is `autospec`. If you set `autospec=True` then the mock with be created with a spec from the object being replaced. All attributes of the mock will also have the spec of the corresponding attribute of the object being replaced. Methods and functions being mocked will have their arguments checked and will raise a `TypeError` if they are called with the wrong signature. For mocks replacing a class, their return value (the 'instance') will have the same spec as the class. Instead of `autospec=True` you can pass `autospec=some_object` to use an arbitrary object as the spec instead of the one being replaced. By default `patch` will fail to replace attributes that don't exist. If you pass in `create=True`, and the attribute doesn't exist, patch will create the attribute for you when the patched function is called, and delete it again afterwards. This is useful for writing tests against attributes that your production code creates at runtime. It is off by by default because it can be dangerous. With it switched on you can write passing tests against APIs that don't actually exist! Patch can be used as a `TestCase` class decorator. It works by decorating each test method in the class. This reduces the boilerplate code when your test methods share a common patchings set. `patch` finds tests by looking for method names that start with `patch.TEST_PREFIX`. By default this is `test`, which matches the way `unittest` finds tests. You can specify an alternative prefix by setting `patch.TEST_PREFIX`. Patch can be used as a context manager, with the with statement. Here the patching applies to the indented block after the with statement. If you use "as" then the patched object will be bound to the name after the "as"; very useful if `patch` is creating a mock object for you. `patch` takes arbitrary keyword arguments. These will be passed to the `Mock` (or `new_callable`) on construction. `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are available for alternate use-cases. """ getter, attribute = _get_target(target) return _patch( getter, attribute, new, spec, create, spec_set, autospec, new_callable, kwargs ) class _patch_dict(object): """ Patch a dictionary, or dictionary like object, and restore the dictionary to its original state after the test. `in_dict` can be a dictionary or a mapping like container. If it is a mapping then it must at least support getting, setting and deleting items plus iterating over keys. `in_dict` can also be a string specifying the name of the dictionary, which will then be fetched by importing it. `values` can be a dictionary of values to set in the dictionary. `values` can also be an iterable of `(key, value)` pairs. If `clear` is True then the dictionary will be cleared before the new values are set. `patch.dict` can also be called with arbitrary keyword arguments to set values in the dictionary:: with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()): ... `patch.dict` can be used as a context manager, decorator or class decorator. When used as a class decorator `patch.dict` honours `patch.TEST_PREFIX` for choosing which methods to wrap. """ def __init__(self, in_dict, values=(), clear=False, **kwargs): if isinstance(in_dict, basestring): in_dict = _importer(in_dict) self.in_dict = in_dict # support any argument supported by dict(...) constructor self.values = dict(values) self.values.update(kwargs) self.clear = clear self._original = None def __call__(self, f): if isinstance(f, ClassTypes): return self.decorate_class(f) @wraps(f) def _inner(*args, **kw): self._patch_dict() try: return f(*args, **kw) finally: self._unpatch_dict() return _inner def decorate_class(self, klass): for attr in dir(klass): attr_value = getattr(klass, attr) if (attr.startswith(patch.TEST_PREFIX) and hasattr(attr_value, "__call__")): decorator = _patch_dict(self.in_dict, self.values, self.clear) decorated = decorator(attr_value) setattr(klass, attr, decorated) return klass def __enter__(self): """Patch the dict.""" self._patch_dict() def _patch_dict(self): values = self.values in_dict = self.in_dict clear = self.clear try: original = in_dict.copy() except AttributeError: # dict like object with no copy method # must support iteration over keys original = {} for key in in_dict: original[key] = in_dict[key] self._original = original if clear: _clear_dict(in_dict) try: in_dict.update(values) except AttributeError: # dict like object with no update method for key in values: in_dict[key] = values[key] def _unpatch_dict(self): in_dict = self.in_dict original = self._original _clear_dict(in_dict) try: in_dict.update(original) except AttributeError: for key in original: in_dict[key] = original[key] def __exit__(self, *args): """Unpatch the dict.""" self._unpatch_dict() return False start = __enter__ stop = __exit__ def _clear_dict(in_dict): try: in_dict.clear() except AttributeError: keys = list(in_dict) for key in keys: del in_dict[key] def _patch_stopall(): """Stop all active patches.""" for patch in list(_patch._active_patches): patch.stop() patch.object = _patch_object patch.dict = _patch_dict patch.multiple = _patch_multiple patch.stopall = _patch_stopall patch.TEST_PREFIX = 'test' magic_methods = ( "lt le gt ge eq ne " "getitem setitem delitem " "len contains iter " "hash str sizeof " "enter exit " "divmod neg pos abs invert " "complex int float index " "trunc floor ceil " ) numerics = "add sub mul div floordiv mod lshift rshift and xor or pow " inplace = ' '.join('i%s' % n for n in numerics.split()) right = ' '.join('r%s' % n for n in numerics.split()) extra = '' if inPy3k: extra = 'bool next ' else: extra = 'unicode long nonzero oct hex truediv rtruediv ' # not including __prepare__, __instancecheck__, __subclasscheck__ # (as they are metaclass methods) # __del__ is not supported at all as it causes problems if it exists _non_defaults = set('__%s__' % method for method in [ 'cmp', 'getslice', 'setslice', 'coerce', 'subclasses', 'format', 'get', 'set', 'delete', 'reversed', 'missing', 'reduce', 'reduce_ex', 'getinitargs', 'getnewargs', 'getstate', 'setstate', 'getformat', 'setformat', 'repr', 'dir' ]) def _get_method(name, func): "Turns a callable object (like a mock) into a real function" def method(self, *args, **kw): return func(self, *args, **kw) method.__name__ = name return method _magics = set( '__%s__' % method for method in ' '.join([magic_methods, numerics, inplace, right, extra]).split() ) _all_magics = _magics | _non_defaults _unsupported_magics = set([ '__getattr__', '__setattr__', '__init__', '__new__', '__prepare__' '__instancecheck__', '__subclasscheck__', '__del__' ]) _calculate_return_value = { '__hash__': lambda self: object.__hash__(self), '__str__': lambda self: object.__str__(self), '__sizeof__': lambda self: object.__sizeof__(self), '__unicode__': lambda self: unicode(object.__str__(self)), } _return_values = { '__lt__': NotImplemented, '__gt__': NotImplemented, '__le__': NotImplemented, '__ge__': NotImplemented, '__int__': 1, '__contains__': False, '__len__': 0, '__exit__': False, '__complex__': 1j, '__float__': 1.0, '__bool__': True, '__nonzero__': True, '__oct__': '1', '__hex__': '0x1', '__long__': long(1), '__index__': 1, } def _get_eq(self): def __eq__(other): ret_val = self.__eq__._mock_return_value if ret_val is not DEFAULT: return ret_val return self is other return __eq__ def _get_ne(self): def __ne__(other): if self.__ne__._mock_return_value is not DEFAULT: return DEFAULT return self is not other return __ne__ def _get_iter(self): def __iter__(): ret_val = self.__iter__._mock_return_value if ret_val is DEFAULT: return iter([]) # if ret_val was already an iterator, then calling iter on it should # return the iterator unchanged return iter(ret_val) return __iter__ _side_effect_methods = { '__eq__': _get_eq, '__ne__': _get_ne, '__iter__': _get_iter, } def _set_return_value(mock, method, name): fixed = _return_values.get(name, DEFAULT) if fixed is not DEFAULT: method.return_value = fixed return return_calulator = _calculate_return_value.get(name) if return_calulator is not None: try: return_value = return_calulator(mock) except AttributeError: # XXXX why do we return AttributeError here? # set it as a side_effect instead? return_value = AttributeError(name) method.return_value = return_value return side_effector = _side_effect_methods.get(name) if side_effector is not None: method.side_effect = side_effector(mock) class MagicMixin(object): def __init__(self, *args, **kw): _super(MagicMixin, self).__init__(*args, **kw) self._mock_set_magics() def _mock_set_magics(self): these_magics = _magics if self._mock_methods is not None: these_magics = _magics.intersection(self._mock_methods) remove_magics = set() remove_magics = _magics - these_magics for entry in remove_magics: if entry in type(self).__dict__: # remove unneeded magic methods delattr(self, entry) # don't overwrite existing attributes if called a second time these_magics = these_magics - set(type(self).__dict__) _type = type(self) for entry in these_magics: setattr(_type, entry, MagicProxy(entry, self)) class NonCallableMagicMock(MagicMixin, NonCallableMock): """A version of `MagicMock` that isn't callable.""" def mock_add_spec(self, spec, spec_set=False): """Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.""" self._mock_add_spec(spec, spec_set) self._mock_set_magics() class MagicMock(MagicMixin, Mock): """ MagicMock is a subclass of Mock with default implementations of most of the magic methods. You can use MagicMock without having to configure the magic methods yourself. If you use the `spec` or `spec_set` arguments then *only* magic methods that exist in the spec will be created. Attributes and the return value of a `MagicMock` will also be `MagicMocks`. """ def mock_add_spec(self, spec, spec_set=False): """Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.""" self._mock_add_spec(spec, spec_set) self._mock_set_magics() class MagicProxy(object): def __init__(self, name, parent): self.name = name self.parent = parent def __call__(self, *args, **kwargs): m = self.create_mock() return m(*args, **kwargs) def create_mock(self): entry = self.name parent = self.parent m = parent._get_child_mock(name=entry, _new_name=entry, _new_parent=parent) setattr(parent, entry, m) _set_return_value(parent, m, entry) return m def __get__(self, obj, _type=None): return self.create_mock() class _ANY(object): "A helper object that compares equal to everything." def __eq__(self, other): return True def __ne__(self, other): return False def __repr__(self): return '' ANY = _ANY() def _format_call_signature(name, args, kwargs): message = '%s(%%s)' % name formatted_args = '' args_string = ', '.join([repr(arg) for arg in args]) kwargs_string = ', '.join([ '%s=%r' % (key, value) for key, value in kwargs.items() ]) if args_string: formatted_args = args_string if kwargs_string: if formatted_args: formatted_args += ', ' formatted_args += kwargs_string return message % formatted_args class _Call(tuple): """ A tuple for holding the results of a call to a mock, either in the form `(args, kwargs)` or `(name, args, kwargs)`. If args or kwargs are empty then a call tuple will compare equal to a tuple without those values. This makes comparisons less verbose:: _Call(('name', (), {})) == ('name',) _Call(('name', (1,), {})) == ('name', (1,)) _Call(((), {'a': 'b'})) == ({'a': 'b'},) The `_Call` object provides a useful shortcut for comparing with call:: _Call(((1, 2), {'a': 3})) == call(1, 2, a=3) _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3) If the _Call has no name then it will match any name. """ def __new__(cls, value=(), name=None, parent=None, two=False, from_kall=True): name = '' args = () kwargs = {} _len = len(value) if _len == 3: name, args, kwargs = value elif _len == 2: first, second = value if isinstance(first, basestring): name = first if isinstance(second, tuple): args = second else: kwargs = second else: args, kwargs = first, second elif _len == 1: value, = value if isinstance(value, basestring): name = value elif isinstance(value, tuple): args = value else: kwargs = value if two: return tuple.__new__(cls, (args, kwargs)) return tuple.__new__(cls, (name, args, kwargs)) def __init__(self, value=(), name=None, parent=None, two=False, from_kall=True): self.name = name self.parent = parent self.from_kall = from_kall def __eq__(self, other): if other is ANY: return True try: len_other = len(other) except TypeError: return False self_name = '' if len(self) == 2: self_args, self_kwargs = self else: self_name, self_args, self_kwargs = self other_name = '' if len_other == 0: other_args, other_kwargs = (), {} elif len_other == 3: other_name, other_args, other_kwargs = other elif len_other == 1: value, = other if isinstance(value, tuple): other_args = value other_kwargs = {} elif isinstance(value, basestring): other_name = value other_args, other_kwargs = (), {} else: other_args = () other_kwargs = value else: # len 2 # could be (name, args) or (name, kwargs) or (args, kwargs) first, second = other if isinstance(first, basestring): other_name = first if isinstance(second, tuple): other_args, other_kwargs = second, {} else: other_args, other_kwargs = (), second else: other_args, other_kwargs = first, second if self_name and other_name != self_name: return False # this order is important for ANY to work! return (other_args, other_kwargs) == (self_args, self_kwargs) def __ne__(self, other): return not self.__eq__(other) def __call__(self, *args, **kwargs): if self.name is None: return _Call(('', args, kwargs), name='()') name = self.name + '()' return _Call((self.name, args, kwargs), name=name, parent=self) def __getattr__(self, attr): if self.name is None: return _Call(name=attr, from_kall=False) name = '%s.%s' % (self.name, attr) return _Call(name=name, parent=self, from_kall=False) def __repr__(self): if not self.from_kall: name = self.name or 'call' if name.startswith('()'): name = 'call%s' % name return name if len(self) == 2: name = 'call' args, kwargs = self else: name, args, kwargs = self if not name: name = 'call' elif not name.startswith('()'): name = 'call.%s' % name else: name = 'call%s' % name return _format_call_signature(name, args, kwargs) def call_list(self): """For a call object that represents multiple calls, `call_list` returns a list of all the intermediate calls as well as the final call.""" vals = [] thing = self while thing is not None: if thing.from_kall: vals.append(thing) thing = thing.parent return _CallList(reversed(vals)) call = _Call(from_kall=False) def create_autospec(spec, spec_set=False, instance=False, _parent=None, _name=None, **kwargs): """Create a mock object using another object as a spec. Attributes on the mock will use the corresponding attribute on the `spec` object as their spec. Functions or methods being mocked will have their arguments checked to check that they are called with the correct signature. If `spec_set` is True then attempting to set attributes that don't exist on the spec object will raise an `AttributeError`. If a class is used as a spec then the return value of the mock (the instance of the class) will have the same spec. You can use a class as the spec for an instance object by passing `instance=True`. The returned mock will only be callable if instances of the mock are callable. `create_autospec` also takes arbitrary keyword arguments that are passed to the constructor of the created mock.""" if _is_list(spec): # can't pass a list instance to the mock constructor as it will be # interpreted as a list of strings spec = type(spec) is_type = isinstance(spec, ClassTypes) _kwargs = {'spec': spec} if spec_set: _kwargs = {'spec_set': spec} elif spec is None: # None we mock with a normal mock without a spec _kwargs = {} _kwargs.update(kwargs) Klass = MagicMock if type(spec) in DescriptorTypes: # descriptors don't have a spec # because we don't know what type they return _kwargs = {} elif not _callable(spec): Klass = NonCallableMagicMock elif is_type and instance and not _instance_callable(spec): Klass = NonCallableMagicMock _new_name = _name if _parent is None: # for a top level object no _new_name should be set _new_name = '' mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name, name=_name, **_kwargs) if isinstance(spec, FunctionTypes): # should only happen at the top level because we don't # recurse for functions mock = _set_signature(mock, spec) else: _check_signature(spec, mock, is_type, instance) if _parent is not None and not instance: _parent._mock_children[_name] = mock if is_type and not instance and 'return_value' not in kwargs: mock.return_value = create_autospec(spec, spec_set, instance=True, _name='()', _parent=mock) for entry in dir(spec): if _is_magic(entry): # MagicMock already does the useful magic methods for us continue if isinstance(spec, FunctionTypes) and entry in FunctionAttributes: # allow a mock to actually be a function continue # XXXX do we need a better way of getting attributes without # triggering code execution (?) Probably not - we need the actual # object to mock it so we would rather trigger a property than mock # the property descriptor. Likewise we want to mock out dynamically # provided attributes. # XXXX what about attributes that raise exceptions other than # AttributeError on being fetched? # we could be resilient against it, or catch and propagate the # exception when the attribute is fetched from the mock try: original = getattr(spec, entry) except AttributeError: continue kwargs = {'spec': original} if spec_set: kwargs = {'spec_set': original} if not isinstance(original, FunctionTypes): new = _SpecState(original, spec_set, mock, entry, instance) mock._mock_children[entry] = new else: parent = mock if isinstance(spec, FunctionTypes): parent = mock.mock new = MagicMock(parent=parent, name=entry, _new_name=entry, _new_parent=parent, **kwargs) mock._mock_children[entry] = new skipfirst = _must_skip(spec, entry, is_type) _check_signature(original, new, skipfirst=skipfirst) # so functions created with _set_signature become instance attributes, # *plus* their underlying mock exists in _mock_children of the parent # mock. Adding to _mock_children may be unnecessary where we are also # setting as an instance attribute? if isinstance(new, FunctionTypes): setattr(mock, entry, new) return mock def _must_skip(spec, entry, is_type): if not isinstance(spec, ClassTypes): if entry in getattr(spec, '__dict__', {}): # instance attribute - shouldn't skip return False spec = spec.__class__ if not hasattr(spec, '__mro__'): # old style class: can't have descriptors anyway return is_type for klass in spec.__mro__: result = klass.__dict__.get(entry, DEFAULT) if result is DEFAULT: continue if isinstance(result, (staticmethod, classmethod)): return False return is_type # shouldn't get here unless function is a dynamically provided attribute # XXXX untested behaviour return is_type def _get_class(obj): try: return obj.__class__ except AttributeError: # in Python 2, _sre.SRE_Pattern objects have no __class__ return type(obj) class _SpecState(object): def __init__(self, spec, spec_set=False, parent=None, name=None, ids=None, instance=False): self.spec = spec self.ids = ids self.spec_set = spec_set self.parent = parent self.instance = instance self.name = name FunctionTypes = ( # python function type(create_autospec), # instance method type(ANY.__eq__), # unbound method type(_ANY.__eq__), ) FunctionAttributes = set([ 'func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name', ]) file_spec = None def mock_open(mock=None, read_data=''): """ A helper function to create a mock to replace the use of `open`. It works for `open` called directly or used as a context manager. The `mock` argument is the mock object to configure. If `None` (the default) then a `MagicMock` will be created for you, with the API limited to methods or attributes available on standard file handles. `read_data` is a string for the `read` method of the file handle to return. This is an empty string by default. """ global file_spec if file_spec is None: # set on first use if inPy3k: import _io file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) else: file_spec = file if mock is None: mock = MagicMock(name='open', spec=open) handle = MagicMock(spec=file_spec) handle.write.return_value = None handle.__enter__.return_value = handle handle.read.return_value = read_data mock.return_value = handle return mock class PropertyMock(Mock): """ A mock intended to be used as a property, or other descriptor, on a class. `PropertyMock` provides `__get__` and `__set__` methods so you can specify a return value when it is fetched. Fetching a `PropertyMock` instance from an object calls the mock, with no args. Setting it calls the mock with the value being set. """ def _get_child_mock(self, **kwargs): return MagicMock(**kwargs) def __get__(self, obj, obj_type): return self() def __set__(self, obj, val): self(val) eventlet-0.30.2/tests/mysqldb_test.py0000644000076500000240000001612214006212666020307 0ustar temotostaff00000000000000from __future__ import print_function import os import time import traceback import eventlet from eventlet import event try: from eventlet.green import MySQLdb except ImportError: MySQLdb = False import tests from tests import skip_unless, using_pyevent, get_database_auth def mysql_requirement(_f): """We want to skip tests if using pyevent, MySQLdb is not installed, or if there is no database running on the localhost that the auth file grants us access to. This errs on the side of skipping tests if everything is not right, but it's better than a million tests failing when you don't care about mysql support.""" if using_pyevent(_f): return False if MySQLdb is False: print("Skipping mysql tests, MySQLdb not importable") return False try: auth = get_database_auth()['MySQLdb'].copy() MySQLdb.connect(**auth) return True except MySQLdb.OperationalError: print("Skipping mysql tests, error when connecting:") traceback.print_exc() return False class TestMySQLdb(tests.LimitedTestCase): def setUp(self): self._auth = get_database_auth()['MySQLdb'] self.create_db() self.connection = None self.connection = MySQLdb.connect(**self._auth) cursor = self.connection.cursor() cursor.execute("""CREATE TABLE gargleblatz ( a INTEGER );""") self.connection.commit() cursor.close() super(TestMySQLdb, self).setUp() def tearDown(self): if self.connection: self.connection.close() self.drop_db() super(TestMySQLdb, self).tearDown() @skip_unless(mysql_requirement) def create_db(self): auth = self._auth.copy() try: self.drop_db() except Exception: pass dbname = 'test_%d_%d' % (os.getpid(), int(time.time() * 1000)) db = MySQLdb.connect(**auth).cursor() db.execute("create database " + dbname) db.close() self._auth['db'] = dbname del db def drop_db(self): db = MySQLdb.connect(**self._auth).cursor() db.execute("drop database " + self._auth['db']) db.close() del db def set_up_dummy_table(self, connection=None): close_connection = False if connection is None: close_connection = True if self.connection is None: connection = MySQLdb.connect(**self._auth) else: connection = self.connection cursor = connection.cursor() cursor.execute(self.dummy_table_sql) connection.commit() cursor.close() if close_connection: connection.close() dummy_table_sql = """CREATE TEMPORARY TABLE test_table ( row_id INTEGER PRIMARY KEY AUTO_INCREMENT, value_int INTEGER, value_float FLOAT, value_string VARCHAR(200), value_uuid CHAR(36), value_binary BLOB, value_binary_string VARCHAR(200) BINARY, value_enum ENUM('Y','N'), created TIMESTAMP ) ENGINE=InnoDB;""" def assert_cursor_yields(self, curs): counter = [0] def tick(): while True: counter[0] += 1 eventlet.sleep() gt = eventlet.spawn(tick) curs.execute("select 1") rows = curs.fetchall() self.assertEqual(len(rows), 1) self.assertEqual(len(rows[0]), 1) self.assertEqual(rows[0][0], 1) assert counter[0] > 0, counter[0] gt.kill() def assert_cursor_works(self, cursor): cursor.execute("select 1") rows = cursor.fetchall() self.assertEqual(len(rows), 1) self.assertEqual(len(rows[0]), 1) self.assertEqual(rows[0][0], 1) self.assert_cursor_yields(cursor) def assert_connection_works(self, conn): curs = conn.cursor() self.assert_cursor_works(curs) def test_module_attributes(self): import MySQLdb as orig for key in dir(orig): if key not in ('__author__', '__path__', '__revision__', '__version__', '__loader__'): assert hasattr(MySQLdb, key), "%s %s" % (key, getattr(orig, key)) def test_connecting(self): assert self.connection is not None def test_connecting_annoyingly(self): self.assert_connection_works(MySQLdb.Connect(**self._auth)) self.assert_connection_works(MySQLdb.Connection(**self._auth)) self.assert_connection_works(MySQLdb.connections.Connection(**self._auth)) def test_create_cursor(self): cursor = self.connection.cursor() cursor.close() def test_run_query(self): cursor = self.connection.cursor() self.assert_cursor_works(cursor) cursor.close() def test_run_bad_query(self): cursor = self.connection.cursor() try: cursor.execute("garbage blah blah") assert False except AssertionError: raise except Exception: pass cursor.close() def fill_up_table(self, conn): curs = conn.cursor() for i in range(1000): curs.execute('insert into test_table (value_int) values (%s)' % i) conn.commit() def test_yields(self): conn = self.connection self.set_up_dummy_table(conn) self.fill_up_table(conn) curs = conn.cursor() results = [] SHORT_QUERY = "select * from test_table" evt = event.Event() def a_query(): self.assert_cursor_works(curs) curs.execute(SHORT_QUERY) results.append(2) evt.send() eventlet.spawn(a_query) results.append(1) self.assertEqual([1], results) evt.wait() self.assertEqual([1, 2], results) def test_visibility_from_other_connections(self): conn = MySQLdb.connect(**self._auth) conn2 = MySQLdb.connect(**self._auth) curs = conn.cursor() try: curs2 = conn2.cursor() curs2.execute("insert into gargleblatz (a) values (%s)" % (314159)) self.assertEqual(curs2.rowcount, 1) conn2.commit() selection_query = "select * from gargleblatz" curs2.execute(selection_query) self.assertEqual(curs2.rowcount, 1) del curs2, conn2 # create a new connection, it should see the addition conn3 = MySQLdb.connect(**self._auth) curs3 = conn3.cursor() curs3.execute(selection_query) self.assertEqual(curs3.rowcount, 1) # now, does the already-open connection see it? curs.execute(selection_query) self.assertEqual(curs.rowcount, 1) del curs3, conn3 finally: # clean up my litter curs.execute("delete from gargleblatz where a=314159") conn.commit() class TestMonkeyPatch(tests.LimitedTestCase): @skip_unless(mysql_requirement) def test_monkey_patching(self): tests.run_isolated('mysqldb_monkey_patch.py') eventlet-0.30.2/tests/nosewrapper.py0000644000076500000240000000104414006212666020137 0ustar temotostaff00000000000000""" This script simply gets the paths correct for testing eventlet with the hub extension for Nose.""" import nose from os.path import dirname, realpath, abspath import sys parent_dir = dirname(dirname(realpath(abspath(__file__)))) if parent_dir not in sys.path: sys.path.insert(0, parent_dir) # hudson does a better job printing the test results if the exit value is 0 zero_status = '--force-zero-status' if zero_status in sys.argv: sys.argv.remove(zero_status) launch = nose.run else: launch = nose.main launch(argv=sys.argv) eventlet-0.30.2/tests/openssl_test.py0000644000076500000240000000066214006212666020321 0ustar temotostaff00000000000000import tests def test_import(): # https://github.com/eventlet/eventlet/issues/238 # Ensure that it's possible to import eventlet.green.OpenSSL. # Most basic test to check Python 3 compatibility. try: import OpenSSL except ImportError: raise tests.SkipTest('need pyopenssl') import eventlet.green.OpenSSL.SSL import eventlet.green.OpenSSL.crypto import eventlet.green.OpenSSL.version eventlet-0.30.2/tests/os_test.py0000644000076500000240000000036514006212666017257 0ustar temotostaff00000000000000import eventlet import six if six.PY3: def test_pathlib_open_issue_534(): pathlib = eventlet.import_patched('pathlib') path = pathlib.Path(__file__) with path.open(): # should not raise pass eventlet-0.30.2/tests/parse_results.py0000644000076500000240000000715714006212666020500 0ustar temotostaff00000000000000import sys import os import traceback try: import sqlite3 except ImportError: import pysqlite2.dbapi2 as sqlite3 import re import glob def parse_stdout(s): argv = re.search('^===ARGV=(.*?)$', s, re.M).group(1) argv = argv.split() testname = argv[-1] del argv[-1] hub = None reactor = None while argv: if argv[0] == '--hub': hub = argv[1] del argv[0] del argv[0] elif argv[0] == '--reactor': reactor = argv[1] del argv[0] del argv[0] else: del argv[0] if reactor is not None: hub += '/%s' % reactor return testname, hub unittest_delim = '----------------------------------------------------------------------' def parse_unittest_output(s): s = s[s.rindex(unittest_delim) + len(unittest_delim):] num = int(re.search('^Ran (\d+) test.*?$', s, re.M).group(1)) ok = re.search('^OK$', s, re.M) error, fail, timeout = 0, 0, 0 failed_match = re.search( r'^FAILED \((?:failures=(?P\d+))?,? ?(?:errors=(?P\d+))?\)$', s, re.M) ok_match = re.search('^OK$', s, re.M) if failed_match: assert not ok_match, (ok_match, s) fail = failed_match.group('f') error = failed_match.group('e') fail = int(fail or '0') error = int(error or '0') else: assert ok_match, repr(s) timeout_match = re.search('^===disabled because of timeout: (\d+)$', s, re.M) if timeout_match: timeout = int(timeout_match.group(1)) return num, error, fail, timeout def main(db): c = sqlite3.connect(db) c.execute('''create table if not exists parsed_command_record (id integer not null unique, testname text, hub text, runs integer, errors integer, fails integer, timeouts integer, error_names text, fail_names text, timeout_names text)''') c.commit() parse_error = 0 SQL = ('select command_record.id, command, stdout, exitcode from command_record ' 'where not exists (select * from parsed_command_record where ' 'parsed_command_record.id=command_record.id)') for row in c.execute(SQL).fetchall(): id, command, stdout, exitcode = row try: testname, hub = parse_stdout(stdout) if unittest_delim in stdout: runs, errors, fails, timeouts = parse_unittest_output(stdout) else: if exitcode == 0: runs, errors, fails, timeouts = 1, 0, 0, 0 if exitcode == 7: runs, errors, fails, timeouts = 0, 0, 0, 1 elif exitcode: runs, errors, fails, timeouts = 1, 1, 0, 0 except Exception: parse_error += 1 sys.stderr.write('Failed to parse id=%s\n' % id) print(repr(stdout)) traceback.print_exc() else: print(id, hub, testname, runs, errors, fails, timeouts) c.execute('insert into parsed_command_record ' '(id, testname, hub, runs, errors, fails, timeouts) ' 'values (?, ?, ?, ?, ?, ?, ?)', (id, testname, hub, runs, errors, fails, timeouts)) c.commit() if __name__ == '__main__': if not sys.argv[1:]: latest_db = sorted(glob.glob('results.*.db'), key=lambda f: os.stat(f).st_mtime)[-1] print(latest_db) sys.argv.append(latest_db) for db in sys.argv[1:]: main(db) execfile('generate_report.py') eventlet-0.30.2/tests/patcher/0000755000076500000240000000000014017673044016652 5ustar temotostaff00000000000000eventlet-0.30.2/tests/patcher/__init__.py0000644000076500000240000000000014006212666020746 0ustar temotostaff00000000000000eventlet-0.30.2/tests/patcher/shared1.py0000644000076500000240000000055614006212666020556 0ustar temotostaff00000000000000import os __test__ = False shared = None if os.environ.get('eventlet_test_in_progress') == 'yes': # pyopenssl imported urllib before we could patch it # we can ensure this shared module was not imported # https://github.com/eventlet/eventlet/issues/362 import tests.patcher.shared_import_socket as shared _ = shared # mask unused import error eventlet-0.30.2/tests/patcher/shared_import_socket.py0000644000076500000240000000025214006212666023430 0ustar temotostaff00000000000000import os import socket __test__ = False _ = socket # mask unused import error # prevent accidental imports assert os.environ.get('eventlet_test_in_progress') == 'yes' eventlet-0.30.2/tests/patcher_psycopg_test.py0000644000076500000240000000342314006212666022026 0ustar temotostaff00000000000000import os import six from tests import patcher_test, skip_unless from tests import get_database_auth from tests.db_pool_test import postgres_requirement psycopg_test_file = """ import os import sys import eventlet eventlet.monkey_patch() from eventlet import patcher if not patcher.is_monkey_patched('psycopg'): print("Psycopg not monkeypatched") sys.exit(0) count = [0] def tick(totalseconds, persecond): for i in range(totalseconds*persecond): count[0] += 1 eventlet.sleep(1.0/persecond) dsn = os.environ['PSYCOPG_TEST_DSN'] import psycopg2 def fetch(num, secs): conn = psycopg2.connect(dsn) cur = conn.cursor() for i in range(num): cur.execute("select pg_sleep(%s)", (secs,)) f = eventlet.spawn(fetch, 2, 1) t = eventlet.spawn(tick, 2, 100) f.wait() assert count[0] > 100, count[0] print("done") """ class PatchingPsycopg(patcher_test.ProcessBase): @skip_unless(postgres_requirement) def test_psycopg_patched(self): if 'PSYCOPG_TEST_DSN' not in os.environ: # construct a non-json dsn for the subprocess psycopg_auth = get_database_auth()['psycopg2'] if isinstance(psycopg_auth, str): dsn = psycopg_auth else: dsn = " ".join(["%s=%s" % (k, v) for k, v in six.iteritems(psycopg_auth)]) os.environ['PSYCOPG_TEST_DSN'] = dsn self.write_to_tempfile("psycopg_patcher", psycopg_test_file) output, lines = self.launch_subprocess('psycopg_patcher.py') if lines[0].startswith('Psycopg not monkeypatched'): print("Can't test psycopg2 patching; it's not installed.") return # if there's anything wrong with the test program it'll have a stack trace assert lines[0].startswith('done'), output eventlet-0.30.2/tests/patcher_test.py0000644000076500000240000004033714006212666020267 0ustar temotostaff00000000000000import os import shutil import sys import tempfile import six import tests base_module_contents = """ import socket import urllib print("base {0} {1}".format(socket, urllib)) """ patching_module_contents = """ from eventlet.green import socket from eventlet.green import urllib from eventlet import patcher print('patcher {0} {1}'.format(socket, urllib)) patcher.inject('base', globals(), ('socket', socket), ('urllib', urllib)) del patcher """ import_module_contents = """ import patching import socket print("importing {0} {1} {2} {3}".format(patching, socket, patching.socket, patching.urllib)) """ class ProcessBase(tests.LimitedTestCase): TEST_TIMEOUT = 3 # starting processes is time-consuming def setUp(self): super(ProcessBase, self).setUp() self._saved_syspath = sys.path self.tempdir = tempfile.mkdtemp('_patcher_test') def tearDown(self): super(ProcessBase, self).tearDown() sys.path = self._saved_syspath shutil.rmtree(self.tempdir) def write_to_tempfile(self, name, contents): filename = os.path.join(self.tempdir, name) if not filename.endswith('.py'): filename = filename + '.py' with open(filename, "w") as fd: fd.write(contents) def launch_subprocess(self, filename): path = os.path.join(self.tempdir, filename) output = tests.run_python(path) if six.PY3: output = output.decode('utf-8') separator = '\n' else: separator = b'\n' lines = output.split(separator) return output, lines def run_script(self, contents, modname=None): if modname is None: modname = "testmod" self.write_to_tempfile(modname, contents) return self.launch_subprocess(modname) class ImportPatched(ProcessBase): def test_patch_a_module(self): self.write_to_tempfile("base", base_module_contents) self.write_to_tempfile("patching", patching_module_contents) self.write_to_tempfile("importing", import_module_contents) output, lines = self.launch_subprocess('importing.py') assert lines[0].startswith('patcher'), repr(output) assert lines[1].startswith('base'), repr(output) assert lines[2].startswith('importing'), repr(output) assert 'eventlet.green.socket' in lines[1], repr(output) assert 'eventlet.green.urllib' in lines[1], repr(output) assert 'eventlet.green.socket' in lines[2], repr(output) assert 'eventlet.green.urllib' in lines[2], repr(output) assert 'eventlet.green.httplib' not in lines[2], repr(output) def test_import_patched_defaults(): tests.run_isolated('patcher_import_patched_defaults.py') def test_import_patched_handles_sub_modules(): tests.run_isolated('test_sub_module_in_import_patched/test.py') class MonkeyPatch(ProcessBase): def test_patched_modules(self): new_mod = """ from eventlet import patcher patcher.monkey_patch() import socket try: import urllib.request as urllib except ImportError: import urllib print("newmod {0} {1}".format(socket.socket, urllib.socket.socket)) """ self.write_to_tempfile("newmod", new_mod) output, lines = self.launch_subprocess('newmod.py') assert lines[0].startswith('newmod'), repr(output) self.assertEqual(lines[0].count('GreenSocket'), 2, repr(output)) def test_early_patching(self): new_mod = """ from eventlet import patcher patcher.monkey_patch() import eventlet eventlet.sleep(0.01) print("newmod") """ self.write_to_tempfile("newmod", new_mod) output, lines = self.launch_subprocess('newmod.py') self.assertEqual(len(lines), 2, repr(output)) assert lines[0].startswith('newmod'), repr(output) def test_late_patching(self): new_mod = """ import eventlet eventlet.sleep(0.01) from eventlet import patcher patcher.monkey_patch() eventlet.sleep(0.01) print("newmod") """ self.write_to_tempfile("newmod", new_mod) output, lines = self.launch_subprocess('newmod.py') self.assertEqual(len(lines), 2, repr(output)) assert lines[0].startswith('newmod'), repr(output) def test_typeerror(self): new_mod = """ from eventlet import patcher patcher.monkey_patch(finagle=True) """ self.write_to_tempfile("newmod", new_mod) output, lines = self.launch_subprocess('newmod.py') assert lines[-2].startswith('TypeError'), repr(output) assert 'finagle' in lines[-2], repr(output) def assert_boolean_logic(self, call, expected, not_expected=''): expected_list = ", ".join(['"%s"' % x for x in expected.split(',') if len(x)]) not_expected_list = ", ".join(['"%s"' % x for x in not_expected.split(',') if len(x)]) new_mod = """ from eventlet import patcher %s for mod in [%s]: assert patcher.is_monkey_patched(mod), mod for mod in [%s]: assert not patcher.is_monkey_patched(mod), mod print("already_patched {0}".format(",".join(sorted(patcher.already_patched.keys())))) """ % (call, expected_list, not_expected_list) self.write_to_tempfile("newmod", new_mod) output, lines = self.launch_subprocess('newmod.py') ap = 'already_patched' assert lines[0].startswith(ap), repr(output) patched_modules = lines[0][len(ap):].strip() # psycopg might or might not be patched based on installed modules patched_modules = patched_modules.replace("psycopg,", "") # ditto for MySQLdb patched_modules = patched_modules.replace("MySQLdb,", "") self.assertEqual( patched_modules, expected, "Logic:%s\nExpected: %s != %s" % (call, expected, patched_modules)) def test_boolean(self): self.assert_boolean_logic("patcher.monkey_patch()", 'os,select,socket,subprocess,thread,time') def test_boolean_all(self): self.assert_boolean_logic("patcher.monkey_patch(all=True)", 'os,select,socket,subprocess,thread,time') def test_boolean_all_single(self): self.assert_boolean_logic("patcher.monkey_patch(all=True, socket=True)", 'os,select,socket,subprocess,thread,time') def test_boolean_all_negative(self): self.assert_boolean_logic( "patcher.monkey_patch(all=False, socket=False, select=True)", 'select') def test_boolean_single(self): self.assert_boolean_logic("patcher.monkey_patch(socket=True)", 'socket') def test_boolean_double(self): self.assert_boolean_logic("patcher.monkey_patch(socket=True, select=True)", 'select,socket') def test_boolean_negative(self): self.assert_boolean_logic("patcher.monkey_patch(socket=False)", 'os,select,subprocess,thread,time') def test_boolean_negative2(self): self.assert_boolean_logic("patcher.monkey_patch(socket=False, time=False)", 'os,select,subprocess,thread') def test_conflicting_specifications(self): self.assert_boolean_logic("patcher.monkey_patch(socket=False, select=True)", 'select') test_monkey_patch_threading = """ def test_monkey_patch_threading(): tickcount = [0] def tick(): import six for i in six.moves.range(1000): tickcount[0] += 1 eventlet.sleep() def do_sleep(): tpool.execute(time.sleep, 0.5) eventlet.spawn(tick) w1 = eventlet.spawn(do_sleep) w1.wait() print(tickcount[0]) assert tickcount[0] > 900 tpool.killall() """ class Tpool(ProcessBase): TEST_TIMEOUT = 3 @tests.skip_with_pyevent def test_simple(self): new_mod = """ import eventlet from eventlet import patcher patcher.monkey_patch() from eventlet import tpool print("newmod {0}".format(tpool.execute(len, "hi"))) print("newmod {0}".format(tpool.execute(len, "hi2"))) tpool.killall() """ self.write_to_tempfile("newmod", new_mod) output, lines = self.launch_subprocess('newmod.py') self.assertEqual(len(lines), 3, output) assert lines[0].startswith('newmod'), repr(output) assert '2' in lines[0], repr(output) assert '3' in lines[1], repr(output) @tests.skip_with_pyevent def test_unpatched_thread(self): new_mod = """import eventlet eventlet.monkey_patch(time=False, thread=False) from eventlet import tpool import time """ new_mod += test_monkey_patch_threading new_mod += "\ntest_monkey_patch_threading()\n" self.write_to_tempfile("newmod", new_mod) output, lines = self.launch_subprocess('newmod.py') self.assertEqual(len(lines), 2, lines) @tests.skip_with_pyevent def test_patched_thread(self): new_mod = """import eventlet eventlet.monkey_patch(time=False, thread=True) from eventlet import tpool import time """ new_mod += test_monkey_patch_threading new_mod += "\ntest_monkey_patch_threading()\n" self.write_to_tempfile("newmod", new_mod) output, lines = self.launch_subprocess('newmod.py') self.assertEqual(len(lines), 2, "\n".join(lines)) def test_subprocess_after_monkey_patch(): code = '''\ import sys import eventlet eventlet.monkey_patch() from eventlet.green import subprocess subprocess.Popen([sys.executable, '-c', ''], stdin=subprocess.PIPE).wait() print('pass') ''' output = tests.run_python( path=None, args=['-c', code], ) assert output.rstrip() == b'pass' class Threading(ProcessBase): def test_orig_thread(self): new_mod = """import eventlet eventlet.monkey_patch() from eventlet import patcher import threading _threading = patcher.original('threading') def test(): print(repr(threading.currentThread())) t = _threading.Thread(target=test) t.start() t.join() print(len(threading._active)) print(len(_threading._active)) """ self.write_to_tempfile("newmod", new_mod) output, lines = self.launch_subprocess('newmod.py') self.assertEqual(len(lines), 4, "\n".join(lines)) assert lines[0].startswith('= 0.1) def test_timeout_non_blocking(self): sem = eventlet.Semaphore() self.assertRaises(ValueError, sem.acquire, blocking=False, timeout=1) def test_reinit(self): # py39+ expects locks to have a _at_fork_reinit() method; since we # patch in Semaphores in eventlet.green.thread, they need it, too sem = eventlet.Semaphore() sem.acquire() sem._at_fork_reinit() self.assertEqual(sem.acquire(blocking=False), True) self.assertEqual(sem.acquire(blocking=False), False) sem = eventlet.Semaphore(0) sem.release() sem._at_fork_reinit() self.assertEqual(sem.acquire(blocking=False), False) sem = eventlet.Semaphore(2) sem.acquire() sem._at_fork_reinit() self.assertEqual(sem.acquire(blocking=False), True) self.assertEqual(sem.acquire(blocking=False), True) self.assertEqual(sem.acquire(blocking=False), False) def test_semaphore_contention(): g_mutex = eventlet.Semaphore() counts = [0, 0] def worker(no): while min(counts) < 200: with g_mutex: counts[no - 1] += 1 eventlet.sleep(0.001) t1 = eventlet.spawn(worker, no=1) t2 = eventlet.spawn(worker, no=2) eventlet.sleep(0.5) t1.kill() t2.kill() assert abs(counts[0] - counts[1]) < int(min(counts) * 0.1), counts def test_semaphore_type_check(): eventlet.Semaphore(0) eventlet.Semaphore(1) eventlet.Semaphore(1e2) with tests.assert_raises(TypeError): eventlet.Semaphore('foo') with tests.assert_raises(ValueError): eventlet.Semaphore(-1) eventlet-0.30.2/tests/socket_test.py0000644000076500000240000000577614006212666020141 0ustar temotostaff00000000000000import array import os import shutil import sys import tempfile import eventlet from eventlet.green import socket from eventlet.support import greendns import tests def test_create_connection_error(): try: socket.create_connection(('192.0.2.1', 80), timeout=0.1) except (IOError, OSError): pass def test_recv_type(): # https://github.com/eventlet/eventlet/issues/245 # socket recv returning multiple data types # For this test to work, client and server have to be in separate # processes or OS threads. Just running two greenthreads gives # false test pass. threading = eventlet.patcher.original('threading') addr = [] def server(): sock = eventlet.listen(('127.0.0.1', 0)) addr[:] = sock.getsockname() eventlet.sleep(0.2) server_thread = threading.Thread(target=server) server_thread.start() eventlet.sleep(0.1) sock = eventlet.connect(tuple(addr)) s = sock.recv(1) assert isinstance(s, bytes) def test_recv_into_type(): # make sure `_recv_loop` returns the correct value when `recv_meth` is of # foo_into type (fills a buffer and returns number of bytes, not the data) # Using threads like `test_recv_type` above. threading = eventlet.patcher.original('threading') addr = [] def server(): sock = eventlet.listen(('127.0.0.1', 0)) addr[:] = sock.getsockname() eventlet.sleep(0.2) server_thread = threading.Thread(target=server) server_thread.start() eventlet.sleep(0.1) sock = eventlet.connect(tuple(addr)) buf = array.array('B', b' ') res = sock.recv_into(buf, 1) assert isinstance(res, int) def test_dns_methods_are_green(): assert socket.gethostbyname is greendns.gethostbyname assert socket.gethostbyname_ex is greendns.gethostbyname_ex assert socket.getaddrinfo is greendns.getaddrinfo assert socket.getnameinfo is greendns.getnameinfo # https://github.com/eventlet/eventlet/pull/341 # mock older dnspython in system packages mock_sys_pkg_dir = tempfile.mkdtemp('eventlet_test_dns_methods_are_green') try: with open(mock_sys_pkg_dir + '/dns.py', 'wb') as f: f.write(b'raise Exception("Your IP address string is so illegal ' + b'it prevents installing packages.")\n') tests.run_isolated('socket_resolve_green.py', pythonpath_extend=[mock_sys_pkg_dir]) finally: shutil.rmtree(mock_sys_pkg_dir) def test_socket_api_family(): # It was named family_or_realsock # https://github.com/eventlet/eventlet/issues/319 socket.socket(family=socket.AF_INET) def test_getaddrinfo_ipv6_scope(): greendns.is_ipv6_addr('::1%2') if not socket.has_ipv6: return socket.getaddrinfo('::1%2', 80, socket.AF_INET6) def test_error_is_timeout(): s1, _ = socket.socketpair() s1.settimeout(0.01) try: s1.recv(1) except socket.error as e: tests.check_is_timeout(e) else: assert False, 'No timeout, socket.error was not raised' eventlet-0.30.2/tests/ssl_test.py0000644000076500000240000003310714006212666017437 0ustar temotostaff00000000000000import contextlib import random import socket import warnings import eventlet from eventlet import greenio from eventlet.green import socket try: from eventlet.green import ssl except ImportError: __test__ = False import six import tests def listen_ssl_socket(address=('localhost', 0), **kwargs): sock = ssl.wrap_socket( socket.socket(), tests.private_key_file, tests.certificate_file, server_side=True, **kwargs ) sock.bind(address) sock.listen(50) return sock class SSLTest(tests.LimitedTestCase): def setUp(self): # disabling socket.ssl warnings because we're testing it here warnings.filterwarnings( action='ignore', message='.*socket.ssl.*', category=DeprecationWarning) super(SSLTest, self).setUp() def test_duplex_response(self): def serve(listener): sock, addr = listener.accept() sock.recv(8192) sock.sendall(b'response') sock = listen_ssl_socket() server_coro = eventlet.spawn(serve, sock) client = ssl.wrap_socket(eventlet.connect(sock.getsockname())) client.sendall(b'line 1\r\nline 2\r\n\r\n') self.assertEqual(client.recv(8192), b'response') server_coro.wait() def test_ssl_context(self): def serve(listener): sock, addr = listener.accept() sock.recv(8192) sock.sendall(b'response') sock = listen_ssl_socket() server_coro = eventlet.spawn(serve, sock) context = ssl.create_default_context() context.verify_mode = ssl.CERT_REQUIRED context.check_hostname = True context.load_verify_locations(tests.certificate_file) client = context.wrap_socket( eventlet.connect(sock.getsockname()), server_hostname='Test') client.sendall(b'line 1\r\nline 2\r\n\r\n') self.assertEqual(client.recv(8192), b'response') server_coro.wait() def test_ssl_close(self): def serve(listener): sock, addr = listener.accept() sock.recv(8192) try: self.assertEqual(b'', sock.recv(8192)) except greenio.SSL.ZeroReturnError: pass sock = listen_ssl_socket() server_coro = eventlet.spawn(serve, sock) raw_client = eventlet.connect(sock.getsockname()) client = ssl.wrap_socket(raw_client) client.sendall(b'X') greenio.shutdown_safe(client) client.close() server_coro.wait() def test_ssl_connect(self): def serve(listener): sock, addr = listener.accept() sock.recv(8192) sock = listen_ssl_socket() server_coro = eventlet.spawn(serve, sock) raw_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ssl_client = ssl.wrap_socket(raw_client) ssl_client.connect(sock.getsockname()) ssl_client.sendall(b'abc') greenio.shutdown_safe(ssl_client) ssl_client.close() server_coro.wait() def test_recv_after_ssl_connect(self): def serve(listener): sock, addr = listener.accept() sock.sendall(b'hjk') sock = listen_ssl_socket() server_coro = eventlet.spawn(serve, sock) raw_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ssl_client = ssl.wrap_socket(raw_client) # Important: We need to call connect() on an SSL socket, not a plain one. # The bug was affecting that particular combination (create plain socket, # wrap, call connect() on the SSL socket and try to recv) on Python 3.5. ssl_client.connect(sock.getsockname()) # The call to recv used to fail with: # Traceback (most recent call last): # File "tests/ssl_test.py", line 99, in test_recv_after_ssl_connect # self.assertEqual(ssl_client.recv(3), b'hjk') # File "eventlet/green/ssl.py", line 194, in recv # return self._base_recv(buflen, flags, into=False) # File "eventlet/green/ssl.py", line 227, in _base_recv # read = self.read(nbytes) # File "eventlet/green/ssl.py", line 139, in read # super(GreenSSLSocket, self).read, *args, **kwargs) # File "eventlet/green/ssl.py", line 113, in _call_trampolining # return func(*a, **kw) # File "PYTHONLIB/python3.5/ssl.py", line 791, in read # return self._sslobj.read(len, buffer) # TypeError: read() argument 2 must be read-write bytes-like object, not None self.assertEqual(ssl_client.recv(3), b'hjk') greenio.shutdown_safe(ssl_client) ssl_client.close() server_coro.wait() def test_ssl_unwrap(self): def serve(): sock, addr = listener.accept() self.assertEqual(sock.recv(6), b'before') sock_ssl = ssl.wrap_socket(sock, tests.private_key_file, tests.certificate_file, server_side=True) sock_ssl.do_handshake() self.assertEqual(sock_ssl.recv(6), b'during') sock2 = sock_ssl.unwrap() self.assertEqual(sock2.recv(5), b'after') sock2.close() listener = eventlet.listen(('127.0.0.1', 0)) server_coro = eventlet.spawn(serve) client = eventlet.connect(listener.getsockname()) client.sendall(b'before') client_ssl = ssl.wrap_socket(client) client_ssl.do_handshake() client_ssl.sendall(b'during') client2 = client_ssl.unwrap() client2.sendall(b'after') server_coro.wait() def test_sendall_cpu_usage(self): """SSL socket.sendall() busy loop https://bitbucket.org/eventlet/eventlet/issue/134/greenssl-performance-issues Idea of this test is to check that GreenSSLSocket.sendall() does not busy loop retrying .send() calls, but instead trampolines until socket is writeable. BUFFER_SIZE and SENDALL_SIZE are magic numbers inferred through trial and error. """ # Time limit resistant to busy loops self.set_alarm(1) stage_1 = eventlet.event.Event() BUFFER_SIZE = 1000 SENDALL_SIZE = 100000 def serve(listener): conn, _ = listener.accept() conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, BUFFER_SIZE) self.assertEqual(conn.recv(8), b'request') conn.sendall(b'response') stage_1.wait() conn.sendall(b'x' * SENDALL_SIZE) server_sock = listen_ssl_socket() server_coro = eventlet.spawn(serve, server_sock) client_sock = eventlet.connect(server_sock.getsockname()) client_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, BUFFER_SIZE) client = ssl.wrap_socket(client_sock) client.sendall(b'request') self.assertEqual(client.recv(8), b'response') stage_1.send() tests.check_idle_cpu_usage(0.2, 0.1) server_coro.kill() def test_greensslobject(self): def serve(listener): sock, addr = listener.accept() sock.sendall(b'content') greenio.shutdown_safe(sock) sock.close() listener = listen_ssl_socket() eventlet.spawn(serve, listener) client = ssl.wrap_socket(eventlet.connect(listener.getsockname())) self.assertEqual(client.recv(1024), b'content') self.assertEqual(client.recv(1024), b'') def test_regression_gh_17(self): # https://github.com/eventlet/eventlet/issues/17 # ssl wrapped but unconnected socket methods go special code path # test that path at least for syntax/typo errors sock = ssl.wrap_socket(socket.socket()) sock.settimeout(0.01) try: sock.sendall(b'') except ssl.SSLError as e: assert 'timed out' in str(e) def test_no_handshake_block_accept_loop(self): listener = listen_ssl_socket() listener.settimeout(0.3) def serve(sock): try: name = sock.recv(8) sock.sendall(b'hello ' + name) except Exception: # ignore evil clients pass finally: greenio.shutdown_safe(sock) sock.close() def accept_loop(): while True: try: sock, _ = listener.accept() except socket.error: return eventlet.spawn(serve, sock) loopt = eventlet.spawn(accept_loop) # evil no handshake evil = eventlet.connect(listener.getsockname()) good = ssl.wrap_socket(eventlet.connect(listener.getsockname())) good.sendall(b'good') response = good.recv(16) good.close() assert response == b'hello good' evil.close() listener.close() loopt.wait() eventlet.sleep(0) def test_receiving_doesnt_block_if_there_is_already_decrypted_buffered_data(self): # Here's what could (and would) happen before the relevant bug was fixed (assuming method # M was trampolining unconditionally before actually reading): # 1. One side sends n bytes, leaves connection open (important) # 2. The other side uses method M to read m (where m < n) bytes, the underlying SSL # implementation reads everything from the underlying socket, decrypts all n bytes, # returns m of them and buffers n-m to be read later. # 3. The other side tries to read the remainder of the data (n-m bytes), this blocks # because M trampolines uncoditionally and trampoline will hang because reading from # the underlying socket would block. It would block because there's no data to be read # and the connection is still open; leaving the connection open /mentioned in 1./ is # important because otherwise trampoline would return immediately and the test would pass # even with the bug still present in the code). # # The solution is to first request data from the underlying SSL implementation and only # trampoline if we actually need to read some data from the underlying socket. # # GreenSSLSocket.recv() wasn't broken but I've added code to test it as well for # completeness. content = b'xy' def recv(sock, expected): assert sock.recv(len(expected)) == expected def recv_into(sock, expected): buf = bytearray(len(expected)) assert sock.recv_into(buf, len(expected)) == len(expected) assert buf == expected for read_function in [recv, recv_into]: print('Trying %s...' % (read_function,)) listener = listen_ssl_socket() def accept(listener): sock, addr = listener.accept() sock.sendall(content) return sock accepter = eventlet.spawn(accept, listener) client_to_server = None try: client_to_server = ssl.wrap_socket(eventlet.connect(listener.getsockname())) for character in six.iterbytes(content): character = six.int2byte(character) print('We have %d already decrypted bytes pending, expecting: %s' % ( client_to_server.pending(), character)) read_function(client_to_server, character) finally: if client_to_server is not None: client_to_server.close() server_to_client = accepter.wait() # Very important: we only want to close the socket *after* the other side has # read the data it wanted already, otherwise this would defeat the purpose of the # test (see the comment at the top of this test). server_to_client.close() listener.close() def test_context_wrapped_accept(self): context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.load_cert_chain(tests.certificate_file, tests.private_key_file) expected = "success:{}".format(random.random()).encode() def client(addr): client_tls = ssl.wrap_socket( eventlet.connect(addr), cert_reqs=ssl.CERT_REQUIRED, ca_certs=tests.certificate_file, ) client_tls.send(expected) server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_sock.bind(('localhost', 0)) server_sock.listen(1) eventlet.spawn(client, server_sock.getsockname()) server_tls = context.wrap_socket(server_sock, server_side=True) peer, _ = server_tls.accept() assert peer.recv(64) == expected peer.close() def test_explicit_keys_accept(self): expected = "success:{}".format(random.random()).encode() def client(addr): client_tls = ssl.wrap_socket( eventlet.connect(addr), cert_reqs=ssl.CERT_REQUIRED, ca_certs=tests.certificate_file, ) client_tls.send(expected) server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_sock.bind(('localhost', 0)) server_sock.listen(1) eventlet.spawn(client, server_sock.getsockname()) server_tls = ssl.wrap_socket( server_sock, server_side=True, keyfile=tests.private_key_file, certfile=tests.certificate_file, ) peer, _ = server_tls.accept() assert peer.recv(64) == expected peer.close() eventlet-0.30.2/tests/stdlib/0000755000076500000240000000000014017673044016505 5ustar temotostaff00000000000000eventlet-0.30.2/tests/stdlib/all.py0000644000076500000240000000340014006212666017621 0ustar temotostaff00000000000000""" Convenience module for running standard library tests with nose. The standard tests are not especially homogeneous, but they mostly expose a test_main method that does the work of selecting which tests to run based on what is supported by the platform. On its own, Nose would run all possible tests and many would fail; therefore we collect all of the test_main methods here in one module and Nose can run it. Hopefully in the future the standard tests get rewritten to be more nosey. Many of these tests make connections to external servers, and all.py tries to skip these tests rather than failing them, so you can get some work done on a plane. """ from eventlet import debug debug.hub_prevent_multiple_readers(False) def restart_hub(): from eventlet import hubs hub = hubs.get_hub() hub_shortname = hub.__module__.split('.')[-1] # don't restart the pyevent hub; it's not necessary if hub_shortname != 'pyevent': hub.abort() hubs.use_hub(hub_shortname) def assimilate_patched(name): try: modobj = __import__(name, globals(), locals(), ['test_main']) restart_hub() except ImportError: print("Not importing %s, it doesn't exist in this installation/version of Python" % name) return else: method_name = name + "_test_main" try: test_method = modobj.test_main def test_main(): restart_hub() test_method() restart_hub() globals()[method_name] = test_main test_main.__name__ = name + '.test_main' except AttributeError: print("No test_main for %s, assuming it tests on import" % name) import all_modules for m in all_modules.get_modules(): assimilate_patched(m) eventlet-0.30.2/tests/stdlib/all_modules.py0000644000076500000240000000206114006212666021353 0ustar temotostaff00000000000000def get_modules(): test_modules = [ 'test_select', 'test_SimpleHTTPServer', 'test_asynchat', 'test_asyncore', 'test_ftplib', 'test_httplib', 'test_os', 'test_queue', 'test_socket_ssl', 'test_socketserver', # 'test_subprocess', 'test_thread', 'test_threading', 'test_threading_local', 'test_urllib', 'test_urllib2_localnet'] network_modules = [ 'test_httpservers', 'test_socket', 'test_ssl', 'test_timeout', 'test_urllib2'] # quick and dirty way of testing whether we can access # remote hosts; any tests that try internet connections # will fail if we cannot import socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.settimeout(0.5) s.connect(('eventlet.net', 80)) s.close() test_modules = test_modules + network_modules except socket.error as e: print("Skipping network tests") return test_modules eventlet-0.30.2/tests/stdlib/all_monkey.py0000644000076500000240000000134414006212666021210 0ustar temotostaff00000000000000import eventlet eventlet.sleep(0) from eventlet import patcher patcher.monkey_patch() def assimilate_real(name): print("Assimilating", name) try: modobj = __import__('test.' + name, globals(), locals(), ['test_main']) except ImportError: print("Not importing %s, it doesn't exist in this installation/version of Python" % name) return else: method_name = name + "_test_main" try: globals()[method_name] = modobj.test_main modobj.test_main.__name__ = name + '.test_main' except AttributeError: print("No test_main for %s, assuming it tests on import" % name) import all_modules for m in all_modules.get_modules(): assimilate_real(m) eventlet-0.30.2/tests/stdlib/test_SimpleHTTPServer.py0000644000076500000240000000034314006212666023233 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import SimpleHTTPServer patcher.inject( 'test.test_SimpleHTTPServer', globals(), ('SimpleHTTPServer', SimpleHTTPServer)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_asynchat.py0000644000076500000240000000075614006212666021735 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import asyncore from eventlet.green import asynchat from eventlet.green import socket from eventlet.green import thread from eventlet.green import threading from eventlet.green import time patcher.inject( "test.test_asynchat", globals(), ('asyncore', asyncore), ('asynchat', asynchat), ('socket', socket), ('thread', thread), ('threading', threading), ('time', time)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_asyncore.py0000644000076500000240000000365214006212666021744 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import asyncore from eventlet.green import select from eventlet.green import socket from eventlet.green import threading from eventlet.green import time patcher.inject("test.test_asyncore", globals()) def new_closeall_check(self, usedefault): # Check that close_all() closes everything in a given map l = [] testmap = {} for i in range(10): c = dummychannel() l.append(c) self.assertEqual(c.socket.closed, False) testmap[i] = c if usedefault: # the only change we make is to not assign to asyncore.socket_map # because doing so fails to assign to the real asyncore's socket_map # and thus the test fails socketmap = asyncore.socket_map.copy() try: asyncore.socket_map.clear() asyncore.socket_map.update(testmap) asyncore.close_all() finally: testmap = asyncore.socket_map.copy() asyncore.socket_map.clear() asyncore.socket_map.update(socketmap) else: asyncore.close_all(testmap) self.assertEqual(len(testmap), 0) for c in l: self.assertEqual(c.socket.closed, True) HelperFunctionTests.closeall_check = new_closeall_check try: # Eventlet's select() emulation doesn't support the POLLPRI flag, # which this test relies on. Therefore, nuke it! BaseTestAPI.test_handle_expt = lambda *a, **kw: None except NameError: pass try: # temporarily disabling these tests in the python2.7/pyevent configuration from tests import using_pyevent import sys if using_pyevent(None) and sys.version_info >= (2, 7): TestAPI_UseSelect.test_handle_accept = lambda *a, **kw: None TestAPI_UseSelect.test_handle_close = lambda *a, **kw: None TestAPI_UseSelect.test_handle_read = lambda *a, **kw: None except NameError: pass if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_ftplib.py0000644000076500000240000000067714006212666021405 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import asyncore from eventlet.green import ftplib from eventlet.green import threading from eventlet.green import socket patcher.inject('test.test_ftplib', globals()) # this test only fails on python2.7/pyevent/--with-xunit; screw that try: TestTLS_FTPClass.test_data_connection = lambda *a, **kw: None except (AttributeError, NameError): pass if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_httplib.py0000644000076500000240000000037114006212666021562 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import httplib from eventlet.green import socket patcher.inject( 'test.test_httplib', globals(), ('httplib', httplib), ('socket', socket)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_httpservers.py0000644000076500000240000000107214006212666022504 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import BaseHTTPServer from eventlet.green import SimpleHTTPServer from eventlet.green import CGIHTTPServer from eventlet.green import urllib from eventlet.green import httplib from eventlet.green import threading patcher.inject( 'test.test_httpservers', globals(), ('BaseHTTPServer', BaseHTTPServer), ('SimpleHTTPServer', SimpleHTTPServer), ('CGIHTTPServer', CGIHTTPServer), ('urllib', urllib), ('httplib', httplib), ('threading', threading)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_os.py0000644000076500000240000000025314006212666020534 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import os patcher.inject( 'test.test_os', globals(), ('os', os)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_queue.py0000644000076500000240000000045614006212666021244 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import Queue from eventlet.green import threading from eventlet.green import time patcher.inject( 'test.test_queue', globals(), ('Queue', Queue), ('threading', threading), ('time', time)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_select.py0000644000076500000240000000027414006212666021375 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import select patcher.inject( 'test.test_select', globals(), ('select', select)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_socket.py0000644000076500000240000000100214006212666021374 0ustar temotostaff00000000000000#!/usr/bin/env python from eventlet import patcher from eventlet.green import socket from eventlet.green import select from eventlet.green import time from eventlet.green import thread from eventlet.green import threading patcher.inject( 'test.test_socket', globals(), ('socket', socket), ('select', select), ('time', time), ('thread', thread), ('threading', threading)) # TODO: fix TCPTimeoutTest.testInterruptedTimeout = lambda *a: None if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_socket_ssl.py0000644000076500000240000000157614006212666022275 0ustar temotostaff00000000000000#!/usr/bin/env python from eventlet import patcher from eventlet.green import socket # enable network resource import test.test_support i_r_e = test.test_support.is_resource_enabled def is_resource_enabled(resource): if resource == 'network': return True else: return i_r_e(resource) test.test_support.is_resource_enabled = is_resource_enabled try: socket.ssl socket.sslerror except AttributeError: raise ImportError("Socket module doesn't support ssl") patcher.inject('test.test_socket_ssl', globals()) test_basic = patcher.patch_function(test_basic) test_rude_shutdown = patcher.patch_function(test_rude_shutdown) def test_main(): if not hasattr(socket, "ssl"): raise test_support.TestSkipped("socket module has no ssl support") test_rude_shutdown() test_basic() test_timeout() if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_socketserver.py0000644000076500000240000000166414006212666022641 0ustar temotostaff00000000000000#!/usr/bin/env python from eventlet import patcher from eventlet.green import SocketServer from eventlet.green import socket from eventlet.green import select from eventlet.green import time from eventlet.green import threading # to get past the silly 'requires' check from test import test_support test_support.use_resources = ['network'] patcher.inject( 'test.test_socketserver', globals(), ('SocketServer', SocketServer), ('socket', socket), ('select', select), ('time', time), ('threading', threading)) # only a problem with pyevent from eventlet import tests if tests.using_pyevent(): try: SocketServerTest.test_ForkingUDPServer = lambda *a, **kw: None SocketServerTest.test_ForkingTCPServer = lambda *a, **kw: None SocketServerTest.test_ForkingUnixStreamServer = lambda *a, **kw: None except (NameError, AttributeError): pass if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_ssl.py0000644000076500000240000000335014006212666020715 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import asyncore from eventlet.green import BaseHTTPServer from eventlet.green import select from eventlet.green import socket from eventlet.green import SocketServer from eventlet.green import SimpleHTTPServer from eventlet.green import ssl from eventlet.green import threading from eventlet.green import urllib # stupid test_support messing with our mojo import test.test_support i_r_e = test.test_support.is_resource_enabled def is_resource_enabled(resource): if resource == 'network': return True else: return i_r_e(resource) test.test_support.is_resource_enabled = is_resource_enabled patcher.inject( 'test.test_ssl', globals(), ('asyncore', asyncore), ('BaseHTTPServer', BaseHTTPServer), ('select', select), ('socket', socket), ('SocketServer', SocketServer), ('ssl', ssl), ('threading', threading), ('urllib', urllib)) # TODO svn.python.org stopped serving up the cert that these tests expect; # presumably they've updated svn trunk but the tests in released versions will # probably break forever. This is why you don't write tests that connect to # external servers. NetworkedTests.testConnect = lambda s: None NetworkedTests.testFetchServerCert = lambda s: None NetworkedTests.test_algorithms = lambda s: None # these don't pass because nonblocking ssl sockets don't report # when the socket is closed uncleanly, per the docstring on # eventlet.green.GreenSSLSocket # *TODO: fix and restore these tests ThreadedTests.testProtocolSSL2 = lambda s: None ThreadedTests.testProtocolSSL3 = lambda s: None ThreadedTests.testProtocolTLS1 = lambda s: None ThreadedTests.testSocketServer = lambda s: None if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_subprocess.py0000644000076500000240000000037714006212666022312 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import subprocess from eventlet.green import time patcher.inject( 'test.test_subprocess', globals(), ('subprocess', subprocess), ('time', time)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_thread.py0000644000076500000240000000052514006212666021364 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import thread from eventlet.green import time patcher.inject('test.test_thread', globals()) try: # this is a new test in 2.7 that we don't support yet TestForkInThread.test_forkinthread = lambda *a, **kw: None except NameError: pass if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_thread__boundedsem.py0000644000076500000240000000107414006212666023730 0ustar temotostaff00000000000000"""Test that BoundedSemaphore with a very high bound is as good as unbounded one""" from eventlet import semaphore from eventlet.green import thread def allocate_lock(): return semaphore.Semaphore(1, 9999) original_allocate_lock = thread.allocate_lock thread.allocate_lock = allocate_lock original_LockType = thread.LockType thread.LockType = semaphore.CappedSemaphore try: import os.path execfile(os.path.join(os.path.dirname(__file__), 'test_thread.py')) finally: thread.allocate_lock = original_allocate_lock thread.LockType = original_LockType eventlet-0.30.2/tests/stdlib/test_threading.py0000644000076500000240000000320614006212666022061 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import threading from eventlet.green import thread from eventlet.green import time # *NOTE: doesn't test as much of the threading api as we'd like because many of # the tests are launched via subprocess and therefore don't get patched patcher.inject('test.test_threading', globals()) # "PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently) # exposed at the Python level. This test relies on ctypes to get at it." # Therefore it's also disabled when testing eventlet, as it's not emulated. try: ThreadTests.test_PyThreadState_SetAsyncExc = lambda s: None except (AttributeError, NameError): pass # disabling this test because it fails when run in Hudson even though it always # succeeds when run manually try: ThreadJoinOnShutdown.test_3_join_in_forked_from_thread = lambda *a, **kw: None except (AttributeError, NameError): pass # disabling this test because it relies on dorking with the hidden # innards of the threading module in a way that doesn't appear to work # when patched try: ThreadTests.test_limbo_cleanup = lambda *a, **kw: None except (AttributeError, NameError): pass # this test has nothing to do with Eventlet; if it fails it's not # because of patching (which it does, grump grump) try: ThreadTests.test_finalize_runnning_thread = lambda *a, **kw: None # it's misspelled in the stdlib, silencing this version as well because # inevitably someone will correct the error ThreadTests.test_finalize_running_thread = lambda *a, **kw: None except (AttributeError, NameError): pass if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_threading_local.py0000644000076500000240000000062714006212666023237 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import thread from eventlet.green import threading from eventlet.green import time # hub requires initialization before test can run from eventlet import hubs hubs.get_hub() patcher.inject( 'test.test_threading_local', globals(), ('time', time), ('thread', thread), ('threading', threading)) if __name__ == '__main__': test_main() eventlet-0.30.2/tests/stdlib/test_timeout.py0000644000076500000240000000054114006212666021601 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import socket from eventlet.green import time patcher.inject( 'test.test_timeout', globals(), ('socket', socket), ('time', time)) # to get past the silly 'requires' check from test import test_support test_support.use_resources = ['network'] if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_urllib.py0000644000076500000240000000037014006212666021404 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import httplib from eventlet.green import urllib patcher.inject( 'test.test_urllib', globals(), ('httplib', httplib), ('urllib', urllib)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_urllib2.py0000644000076500000240000000113614006212666021467 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import socket from eventlet.green import urllib2 patcher.inject( 'test.test_urllib2', globals(), ('socket', socket), ('urllib2', urllib2)) HandlerTests.test_file = patcher.patch_function(HandlerTests.test_file, ('socket', socket)) HandlerTests.test_cookie_redirect = patcher.patch_function( HandlerTests.test_cookie_redirect, ('urllib2', urllib2)) OpenerDirectorTests.test_badly_named_methods = patcher.patch_function( OpenerDirectorTests.test_badly_named_methods, ('urllib2', urllib2)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/stdlib/test_urllib2_localnet.py0000644000076500000240000000063014006212666023346 0ustar temotostaff00000000000000from eventlet import patcher from eventlet.green import BaseHTTPServer from eventlet.green import threading from eventlet.green import socket from eventlet.green import urllib2 patcher.inject( 'test.test_urllib2_localnet', globals(), ('BaseHTTPServer', BaseHTTPServer), ('threading', threading), ('socket', socket), ('urllib2', urllib2)) if __name__ == "__main__": test_main() eventlet-0.30.2/tests/subprocess_test.py0000644000076500000240000000636114006212666021030 0ustar temotostaff00000000000000import sys import time import eventlet from eventlet.green import subprocess import eventlet.patcher import tests original_subprocess = eventlet.patcher.original('subprocess') def test_subprocess_wait(): # https://bitbucket.org/eventlet/eventlet/issue/89 # In Python 3.3 subprocess.Popen.wait() method acquired `timeout` # argument. # RHEL backported it to their Python 2.6 package. cmd = [sys.executable, "-c", "import time; time.sleep(0.5)"] p = subprocess.Popen(cmd) ok = False t1 = time.time() try: p.wait(timeout=0.1) except subprocess.TimeoutExpired as e: str(e) # make sure it doesn't throw assert e.cmd == cmd assert e.timeout == 0.1 ok = True tdiff = time.time() - t1 assert ok, 'did not raise subprocess.TimeoutExpired' assert 0.1 <= tdiff <= 0.2, 'did not stop within allowed time' def test_communicate_with_poll(): # This test was being skipped since git 25812fca8, I don't there's # a need to do this. The original comment: # # https://github.com/eventlet/eventlet/pull/24 # `eventlet.green.subprocess.Popen.communicate()` was broken # in Python 2.7 because the usage of the `select` module was moved from # `_communicate` into two other methods `_communicate_with_select` # and `_communicate_with_poll`. Link to 2.7's implementation: # http://hg.python.org/cpython/file/2145593d108d/Lib/subprocess.py#l1255 p = subprocess.Popen( [sys.executable, '-c', 'import time; time.sleep(0.5)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) t1 = time.time() eventlet.with_timeout(0.1, p.communicate, timeout_value=True) tdiff = time.time() - t1 assert 0.1 <= tdiff <= 0.2, 'did not stop within allowed time' def test_close_popen_stdin_with_close_fds(): p = subprocess.Popen( ['ls'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=False, cwd=None, env=None) p.communicate(None) try: p.stdin.close() except Exception as e: assert False, "Exception should not be raised, got %r instead" % e def test_universal_lines(): p = subprocess.Popen( [sys.executable, '--version'], shell=False, stdout=subprocess.PIPE, universal_newlines=True) p.communicate(None) def test_patched_communicate_290(): # https://github.com/eventlet/eventlet/issues/290 # Certain order of import and monkey_patch breaks subprocess communicate() # with AttributeError module `select` has no `poll` on Linux # unpatched methods are removed for safety reasons in commit f63165c0e3 tests.run_isolated('subprocess_patched_communicate.py') def test_check_call_without_timeout_works(): # There was a regression that'd result in the following exception: # TypeError: check_call() missing 1 required keyword-only argument: 'timeout' subprocess.check_call( ['ls'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) def test_exception_identity(): # https://github.com/eventlet/eventlet/issues/413 # green module must keep exceptions classes as stdlib version tests.run_isolated('subprocess_exception_identity.py') eventlet-0.30.2/tests/test__event.py0000644000076500000240000000211214006212666020106 0ustar temotostaff00000000000000from eventlet import spawn, sleep, with_timeout from eventlet.event import Event import eventlet from tests import LimitedTestCase DELAY = 0.01 class TestEvent(LimitedTestCase): def test_send_exc(self): log = [] e = Event() def waiter(): try: result = e.wait() log.append(('received', result)) except Exception as ex: log.append(('catched', ex)) spawn(waiter) sleep(0) # let waiter to block on e.wait() obj = Exception() e.send(exc=obj) sleep(0) sleep(0) assert log == [('catched', obj)], log def test_send(self): event1 = Event() event2 = Event() spawn(event1.send, 'hello event1') eventlet.Timeout(0, ValueError('interrupted')) try: result = event1.wait() except ValueError: X = object() result = with_timeout(DELAY, event2.wait, timeout_value=X) assert result is X, 'Nobody sent anything to event2 yet it received %r' % (result, ) eventlet-0.30.2/tests/test__greenness.py0000644000076500000240000000305314006212666020763 0ustar temotostaff00000000000000"""Test than modules in eventlet.green package are indeed green. To do that spawn a green server and then access it using a green socket. If either operation blocked the whole script would block and timeout. """ import eventlet from eventlet.green import BaseHTTPServer import six if six.PY2: from eventlet.green.urllib2 import HTTPError, urlopen else: from eventlet.green.urllib.request import urlopen from eventlet.green.urllib.error import HTTPError class QuietHandler(BaseHTTPServer.BaseHTTPRequestHandler): protocol_version = "HTTP/1.0" def log_message(self, *args, **kw): pass def start_http_server(): server_address = ('localhost', 0) httpd = BaseHTTPServer.HTTPServer(server_address, QuietHandler) sa = httpd.socket.getsockname() # print("Serving HTTP on", sa[0], "port", sa[1], "...") httpd.request_count = 0 def serve(): # increment the request_count before handling the request because # the send() for the response blocks (or at least appeared to be) httpd.request_count += 1 httpd.handle_request() return eventlet.spawn(serve), httpd, sa[1] def test_urllib(): gthread, server, port = start_http_server() try: assert server.request_count == 0 try: urlopen('http://127.0.0.1:{0}'.format(port)) assert False, 'should not get there' except HTTPError as ex: assert ex.code == 501, repr(ex) assert server.request_count == 1 finally: server.server_close() eventlet.kill(gthread) eventlet-0.30.2/tests/test__refcount.py0000644000076500000240000000414114006212666020616 0ustar temotostaff00000000000000"""This test checks that socket instances (not GreenSockets but underlying sockets) are not leaked by the hub. """ import gc import pprint import sys import weakref import eventlet from eventlet.green import socket SOCKET_TIMEOUT = 0.1 def handle_request(s, raise_on_timeout): try: conn, address = s.accept() except socket.timeout: print('handle_request: server accept timeout') if raise_on_timeout: raise else: return print('handle_request: accepted') res = conn.recv(100) assert res == b'hello', repr(res) # print('handle_request: recvd %r' % res) res = conn.sendall(b'bye') # print('handle_request: sent %r' % res) # print('handle_request: conn refcount: %s' % sys.getrefcount(conn)) def make_request(addr): # print('make_request') s = eventlet.connect(addr) # print('make_request - connected') res = s.sendall(b'hello') # print('make_request - sent %s' % res) res = s.recv(100) assert res == b'bye', repr(res) # print('make_request - recvd %r' % res) def run_interaction(run_client): s = eventlet.listen(('127.0.0.1', 0)) s.settimeout(SOCKET_TIMEOUT) addr = s.getsockname() print('run_interaction: addr:', addr) eventlet.spawn(handle_request, s, run_client) if run_client: eventlet.spawn(make_request, addr) eventlet.sleep(0.1 + SOCKET_TIMEOUT) print('run_interaction: refcount(s.fd)', sys.getrefcount(s.fd)) return weakref.ref(s.fd) def run_and_check(run_client): w = run_interaction(run_client=run_client) # clear_sys_exc_info() gc.collect() fd = w() print('run_and_check: weakref fd:', fd) if fd: print(pprint.pformat(gc.get_referrers(fd))) for x in gc.get_referrers(fd): print(pprint.pformat(x)) for y in gc.get_referrers(x): print('- {0}'.format(pprint.pformat(y))) raise AssertionError('server should be dead by now') def test_clean_exit(): run_and_check(True) run_and_check(True) def test_timeout_exit(): run_and_check(False) run_and_check(False) eventlet-0.30.2/tests/test__socket_errors.py0000644000076500000240000000415414006212666021661 0ustar temotostaff00000000000000import errno import unittest import socket as _original_sock from eventlet.green import socket class TestSocketErrors(unittest.TestCase): def test_connection_refused(self): # open and close a dummy server to find an unused port server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('127.0.0.1', 0)) server.listen(1) port = server.getsockname()[1] server.close() del server s = socket.socket() try: s.connect(('127.0.0.1', port)) self.fail("Shouldn't have connected") except socket.error as ex: code, text = ex.args assert code == errno.ECONNREFUSED, 'Expected ECONNREFUSED, got {0} ({1})'.format(code, text) assert 'refused' in text.lower(), (code, text) def test_timeout_real_socket(self): """ Test underlying socket behavior to ensure correspondence between green sockets and the underlying socket module. """ return self.test_timeout(socket=_original_sock) def test_timeout(self, socket=socket): """ Test that the socket timeout exception works correctly. """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('127.0.0.1', 0)) server.listen(1) port = server.getsockname()[1] s = socket.socket() s.connect(('127.0.0.1', port)) cs, addr = server.accept() cs.settimeout(1) try: try: cs.recv(1024) self.fail("Should have timed out") except socket.timeout as ex: assert hasattr(ex, 'args') assert len(ex.args) == 1 assert ex.args[0] == 'timed out' finally: s.close() cs.close() server.close() def test_create_connection_refused(): try: socket.create_connection(('127.0.0.1', 1)) assert False, "Shouldn't have connected" except socket.error as ex: code, text = ex.args assert code == errno.ECONNREFUSED, 'Expected ECONNREFUSED, got {0} ({1})'.format(code, text) eventlet-0.30.2/tests/test_server.crt0000644000076500000240000000252314006212666020302 0ustar temotostaff00000000000000-----BEGIN CERTIFICATE----- MIIDwjCCAqqgAwIBAgIJAN19NW1oDKKtMA0GCSqGSIb3DQEBCwUAMHYxCzAJBgNV BAYTAlRTMQ0wCwYDVQQIDARUZXN0MQ0wCwYDVQQHDARUZXN0MRYwFAYDVQQKDA1U ZXN0IEV2ZW50bGV0MQ0wCwYDVQQLDARUZXN0MQ0wCwYDVQQDDARUZXN0MRMwEQYJ KoZIhvcNAQkBFgRUZXN0MB4XDTE4MDgyMjEzNDIxMVoXDTI4MDgxOTEzNDIxMVow djELMAkGA1UEBhMCVFMxDTALBgNVBAgMBFRlc3QxDTALBgNVBAcMBFRlc3QxFjAU BgNVBAoMDVRlc3QgRXZlbnRsZXQxDTALBgNVBAsMBFRlc3QxDTALBgNVBAMMBFRl c3QxEzARBgkqhkiG9w0BCQEWBFRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQC8Fyu/6oOS5J8RENcE9FP/zmaM59MxlCmc7t9wlL/3y4GciztfxUb4 EEmosUglHH/BSqFggZ6d8ncuUpHR5bPvEBTB8OYpjYQlF1S0a7D46tx+SwbHqlZY 8YIOah9Hdt0Jc4CmGBcZ9qeH/G9z1duVcsMuCGQ3WOuP3ObFb7UR9hNaD/xXZX8c Lvc6cJHMKaxHCeIBOL+z/9kJqhh30eqsmNB5AXSoV8b2B3MV3glW2vd5WJVYEWxl 3+GNgzZJ3KGape7pcBYER7zg/yZLZxgNFlTCOZiysjNxC0liJA9tgUQhRc1gsqA8 dQxzvqW8kuZedmatjyM58WixvjymobC3AgMBAAGjUzBRMB0GA1UdDgQWBBQT3V3f 8vCoqGXe6zySSjVP+J/P7zAfBgNVHSMEGDAWgBQT3V3f8vCoqGXe6zySSjVP+J/P 7zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAws7zXsftw5s/P dnyFAf8q0WoVtWi9ljshWfJvTFMfuCqdSoNT+kIeQq071/RFW9YLqDZGXI4fyfzW 50A7xFm+Syy7wPOmfLJdPx5HRJ5jgIDlij9vL45W3mXEohkkzMCdjwXfOIQPOEEx ZQHF57RaHlKEGexc/yvOLlOgKP23BOgB7pZjCC9divyDJ3ETlzgE+UTymHxmFM0i TCAM9dGEl1QPr7zA08rNgVae+/uQksdM55QmQFkTAXisFPcxNgHSKOSHsDiUJvWG 7bJrwO6+T2wjRxWRD7anQV3DqBG1WteXA/dfYqjUi0QPqreWqNb+3OM60UwPJsvl ZDfUrsbY -----END CERTIFICATE----- eventlet-0.30.2/tests/test_server.key0000644000076500000240000000325014006212666020300 0ustar temotostaff00000000000000-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC8Fyu/6oOS5J8R ENcE9FP/zmaM59MxlCmc7t9wlL/3y4GciztfxUb4EEmosUglHH/BSqFggZ6d8ncu UpHR5bPvEBTB8OYpjYQlF1S0a7D46tx+SwbHqlZY8YIOah9Hdt0Jc4CmGBcZ9qeH /G9z1duVcsMuCGQ3WOuP3ObFb7UR9hNaD/xXZX8cLvc6cJHMKaxHCeIBOL+z/9kJ qhh30eqsmNB5AXSoV8b2B3MV3glW2vd5WJVYEWxl3+GNgzZJ3KGape7pcBYER7zg /yZLZxgNFlTCOZiysjNxC0liJA9tgUQhRc1gsqA8dQxzvqW8kuZedmatjyM58Wix vjymobC3AgMBAAECggEARdC79oKP0Ah4WpdLmJUwULVGL2slYzPBtnScD2fHUa0C MR8lLMlHLHCvRqR3SP8pFJAlEnNKcwo63sDbS3PdtBDeA5btd+MFd9cPI+ENUXer 3Fzw6U/6uQz732nYrmgaZ5Wt3Pj4KjT5vmoe3Ho3BWQRupZ2zGx/E2lFZ2etaDqx yId9dqsLhR2dTdCJ8xaf01JSIcZF5ctP3Hx/Xk3b4QLdAujSFfee1qtPyMU89cQG yjkqTdzVl3Z1isACPHsCHxyIs3FDU1B4YuPyPoMx8F6Ok81/r6gen0asu3H4VrIH oYnuiHftdGXzpCQO9aWnpxhV7Na9nkkBldsQSvrLwQKBgQDgByy9/K9yOgfQrepY jI7s5p5Ho9dsIOrNN8JXZONYY7S9FS3jBfG7p/tPO8hXaSl7VKJBomSQbO11s/72 +GAcXDYNG1a6nrX6yXQwGcvUT2F9lnlModzIrEljuH4Af9VA50+5cEwx8HL370vo 7rVyAgKaOlZBLoB+B3SYgmWdpwKBgQDW7wZ0vT/44vuevF3mMv2z+DEeLxJRXSVc j17c+XOcQIb7kQ8kyi5tqBDUL7ywMSk3zLwBFnB62z+c2z9JGafPCPLXWEE73JLg eFuHpdYbxssu1GdK+uzhzFeF9W7fy9LiP+a2ZmiD7iSOMQ+4AilWggsOS4/TPM/S KCpiWMdWcQKBgGfOC0ZRMn5sMzjXjTHQl11gEtFBMs1whzvl7VAPeGR5dsLycFu0 YmlAkesJ2W/fU5BJDBH7jDt7raAPckG/ntElTQpPs6Uva0U0WXj8Sv0qdxL5SWVf zuzRIARr9T6H/SlgxQk45qVxM+R8ZiEdkser9Ma/5z22rWU4USdlrG4bAoGAdWmD tFgBz1tnqxIplWw7jYnCg0OhlRL2KvcjEkGu9yeE1X4rsKRF8p9B+jfNss5YPuQ3 u7xfW63TtUNL8gny0rWHuz25/RKEpdz520MrkGbrbbyYZKxopPy0vzgRBm5Y2EGi LfgJljCMhaSleparsFc/5OJAQlvRB3Prm2f2FPECgYEA2+TCLKOQslh/3PJD4mlY VhQTDYkiKMWW3LXYXHhTEBWtxkdhZ1AkrOs10Urw+YQ2tpmkR73Nw+PUnG1o4Wka vaDRaKqjmAnLU9ktXChZi3JME/cmgY0VomLkkPv2bGfmWDKI+BKmhdZCxsi0NgPi fcngyWF+LKLfvpuyAqOfwLg= -----END PRIVATE KEY----- eventlet-0.30.2/tests/thread_test.py0000644000076500000240000000510414006212666020101 0ustar temotostaff00000000000000import gc import weakref import eventlet from eventlet import corolocal from eventlet import event from eventlet import greenthread from eventlet.green import thread import six from tests import LimitedTestCase class Locals(LimitedTestCase): def passthru(self, *args, **kw): self.results.append((args, kw)) return args, kw def setUp(self): self.results = [] super(Locals, self).setUp() def tearDown(self): self.results = [] super(Locals, self).tearDown() def test_assignment(self): my_local = corolocal.local() my_local.a = 1 def do_something(): my_local.b = 2 self.assertEqual(my_local.b, 2) try: my_local.a self.fail() except AttributeError: pass eventlet.spawn(do_something).wait() self.assertEqual(my_local.a, 1) def test_calls_init(self): init_args = [] class Init(corolocal.local): def __init__(self, *args): init_args.append((args, eventlet.getcurrent())) my_local = Init(1, 2, 3) self.assertEqual(init_args[0][0], (1, 2, 3)) self.assertEqual(init_args[0][1], eventlet.getcurrent()) def do_something(): my_local.foo = 'bar' self.assertEqual(len(init_args), 2, init_args) self.assertEqual(init_args[1][0], (1, 2, 3)) self.assertEqual(init_args[1][1], eventlet.getcurrent()) eventlet.spawn(do_something).wait() def test_calling_methods(self): class Caller(corolocal.local): def callme(self): return self.foo my_local = Caller() my_local.foo = "foo1" self.assertEqual("foo1", my_local.callme()) def do_something(): my_local.foo = "foo2" self.assertEqual("foo2", my_local.callme()) eventlet.spawn(do_something).wait() my_local.foo = "foo3" self.assertEqual("foo3", my_local.callme()) def test_no_leaking(self): refs = weakref.WeakKeyDictionary() my_local = corolocal.local() class X(object): pass def do_something(i): o = X() refs[o] = True my_local.foo = o p = eventlet.GreenPool() for i in six.moves.range(100): p.spawn(do_something, i) p.waitall() del p gc.collect() eventlet.sleep(0) gc.collect() # at this point all our coros have terminated self.assertEqual(len(refs), 1) eventlet-0.30.2/tests/timeout_test.py0000644000076500000240000000324314006212666020322 0ustar temotostaff00000000000000import eventlet import tests DELAY = 0.01 class TestDirectRaise(tests.LimitedTestCase): def test_direct_raise_class(self): try: raise eventlet.Timeout except eventlet.Timeout as t: assert not t.pending, repr(t) def test_direct_raise_instance(self): tm = eventlet.Timeout() try: raise tm except eventlet.Timeout as t: assert tm is t, (tm, t) assert not t.pending, repr(t) def test_repr(self): # just verify these don't crash tm = eventlet.Timeout(1) eventlet.sleep(0) repr(tm) str(tm) tm.cancel() tm = eventlet.Timeout(None, RuntimeError) repr(tm) str(tm) tm = eventlet.Timeout(None, False) repr(tm) str(tm) class TestWithTimeout(tests.LimitedTestCase): def test_with_timeout(self): self.assertRaises(eventlet.Timeout, eventlet.with_timeout, DELAY, eventlet.sleep, DELAY * 10) X = object() r = eventlet.with_timeout(DELAY, eventlet.sleep, DELAY * 10, timeout_value=X) assert r is X, (r, X) r = eventlet.with_timeout(DELAY * 10, eventlet.sleep, DELAY, timeout_value=X) assert r is None, r def test_with_outer_timer(self): def longer_timeout(): # this should not catch the outer timeout's exception return eventlet.with_timeout(DELAY * 10, eventlet.sleep, DELAY * 20, timeout_value='b') self.assertRaises( eventlet.Timeout, eventlet.with_timeout, DELAY, longer_timeout) def test_is_timeout_attribute(): tests.check_is_timeout(eventlet.Timeout()) eventlet-0.30.2/tests/timeout_test_with_statement.py0000644000076500000240000001005014006212666023433 0ustar temotostaff00000000000000"""Tests with-statement behavior of Timeout class.""" import gc import sys import time import weakref from eventlet import sleep from eventlet.timeout import Timeout from tests import LimitedTestCase DELAY = 0.01 class Error(Exception): pass class Test(LimitedTestCase): def test_cancellation(self): # Nothing happens if with-block finishes before the timeout expires t = Timeout(DELAY * 2) sleep(0) # make it pending assert t.pending, repr(t) with t: assert t.pending, repr(t) sleep(DELAY) # check if timer was actually cancelled assert not t.pending, repr(t) sleep(DELAY * 2) def test_raising_self(self): # An exception will be raised if it's not try: with Timeout(DELAY) as t: sleep(DELAY * 2) except Timeout as ex: assert ex is t, (ex, t) else: raise AssertionError('must raise Timeout') def test_raising_self_true(self): # specifying True as the exception raises self as well try: with Timeout(DELAY, True) as t: sleep(DELAY * 2) except Timeout as ex: assert ex is t, (ex, t) else: raise AssertionError('must raise Timeout') def test_raising_custom_exception(self): # You can customize the exception raised: try: with Timeout(DELAY, IOError("Operation takes way too long")): sleep(DELAY * 2) except IOError as ex: assert str(ex) == "Operation takes way too long", repr(ex) def test_raising_exception_class(self): # Providing classes instead of values should be possible too: try: with Timeout(DELAY, ValueError): sleep(DELAY * 2) except ValueError: pass def test_raising_exc_tuple(self): try: 1 // 0 except: try: with Timeout(DELAY, sys.exc_info()[0]): sleep(DELAY * 2) raise AssertionError('should not get there') raise AssertionError('should not get there') except ZeroDivisionError: pass else: raise AssertionError('should not get there') def test_cancel_timer_inside_block(self): # It's possible to cancel the timer inside the block: with Timeout(DELAY) as timer: timer.cancel() sleep(DELAY * 2) def test_silent_block(self): # To silence the exception before exiting the block, pass # False as second parameter. XDELAY = 0.1 start = time.time() with Timeout(XDELAY, False): sleep(XDELAY * 2) delta = (time.time() - start) assert delta < XDELAY * 2, delta def test_dummy_timer(self): # passing None as seconds disables the timer with Timeout(None): sleep(DELAY) sleep(DELAY) def test_ref(self): err = Error() err_ref = weakref.ref(err) with Timeout(DELAY * 2, err): sleep(DELAY) del err gc.collect() assert not err_ref(), repr(err_ref()) def test_nested_timeout(self): with Timeout(DELAY, False): with Timeout(DELAY * 2, False): sleep(DELAY * 3) raise AssertionError('should not get there') with Timeout(DELAY) as t1: with Timeout(DELAY * 2) as t2: try: sleep(DELAY * 3) except Timeout as ex: assert ex is t1, (ex, t1) assert not t1.pending, t1 assert t2.pending, t2 assert not t2.pending, t2 with Timeout(DELAY * 2) as t1: with Timeout(DELAY) as t2: try: sleep(DELAY * 3) except Timeout as ex: assert ex is t2, (ex, t2) assert t1.pending, t1 assert not t2.pending, t2 assert not t1.pending, t1 eventlet-0.30.2/tests/timer_test.py0000644000076500000240000000235114006212666017753 0ustar temotostaff00000000000000from unittest import TestCase, main import eventlet from eventlet import hubs from eventlet.hubs import timer class TestTimer(TestCase): def test_copy(self): t = timer.Timer(0, lambda: None) t2 = t.copy() assert t.seconds == t2.seconds assert t.tpl == t2.tpl assert t.called == t2.called def test_schedule(self): hub = hubs.get_hub() # clean up the runloop, preventing side effects from previous tests # on this thread if hub.running: hub.abort() eventlet.sleep(0) called = [] # t = timer.Timer(0, lambda: (called.append(True), hub.abort())) # t.schedule() # let's have a timer somewhere in the future; make sure abort() still works # (for pyevent, its dispatcher() does not exit if there is something scheduled) # XXX pyevent handles this, other hubs do not # hubs.get_hub().schedule_call_global(10000, lambda: (called.append(True), hub.abort())) hubs.get_hub().schedule_call_global(0, lambda: (called.append(True), hub.abort())) hub.default_sleep = lambda: 0.0 hub.switch() assert called assert not hub.running if __name__ == '__main__': main() eventlet-0.30.2/tests/tpool_test.py0000644000076500000240000002646014006212666017777 0ustar temotostaff00000000000000# Copyright (c) 2007, Linden Research, Inc. # Copyright (c) 2007, IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import gc import random import re import time import eventlet from eventlet import tpool import six import tests one = 1 two = 2 three = 3 none = None def noop(): pass def raise_exception(): raise RuntimeError("hi") class TestTpool(tests.LimitedTestCase): def setUp(self): super(TestTpool, self).setUp() def tearDown(self): tpool.killall() super(TestTpool, self).tearDown() @tests.skip_with_pyevent def test_wrap_tuple(self): my_tuple = (1, 2) prox = tpool.Proxy(my_tuple) self.assertEqual(prox[0], 1) self.assertEqual(prox[1], 2) self.assertEqual(len(my_tuple), 2) @tests.skip_with_pyevent def test_wrap_string(self): my_object = "whatever" prox = tpool.Proxy(my_object) self.assertEqual(str(my_object), str(prox)) self.assertEqual(len(my_object), len(prox)) self.assertEqual(my_object.join(['a', 'b']), prox.join(['a', 'b'])) @tests.skip_with_pyevent def test_wrap_uniterable(self): prox = tpool.Proxy([]) def index(): prox[0] def key(): prox['a'] self.assertRaises(IndexError, index) self.assertRaises(TypeError, key) @tests.skip_with_pyevent def test_wrap_dict(self): my_object = {'a': 1} prox = tpool.Proxy(my_object) self.assertEqual('a', list(prox.keys())[0]) self.assertEqual(1, prox['a']) self.assertEqual(str(my_object), str(prox)) self.assertEqual(repr(my_object), repr(prox)) @tests.skip_with_pyevent def test_wrap_module_class(self): prox = tpool.Proxy(re) self.assertEqual(tpool.Proxy, type(prox)) exp = prox.compile('(.)(.)(.)') self.assertEqual(exp.groups, 3) assert repr(prox.compile) @tests.skip_with_pyevent def test_wrap_eq(self): prox = tpool.Proxy(re) exp1 = prox.compile('.') exp2 = prox.compile(exp1.pattern) self.assertEqual(exp1, exp2) exp3 = prox.compile('/') assert exp1 != exp3 @tests.skip_with_pyevent def test_wrap_ints(self): p = tpool.Proxy(4) assert p == 4 @tests.skip_with_pyevent def test_wrap_hash(self): prox1 = tpool.Proxy('' + 'A') prox2 = tpool.Proxy('A' + '') assert prox1 == 'A' assert 'A' == prox2 # assert prox1 == prox2 FIXME - could __eq__ unwrap rhs if it is other proxy? self.assertEqual(hash(prox1), hash(prox2)) proxList = tpool.Proxy([]) self.assertRaises(TypeError, hash, proxList) @tests.skip_with_pyevent def test_wrap_nonzero(self): prox = tpool.Proxy(re) exp1 = prox.compile('.') assert bool(exp1) prox2 = tpool.Proxy([1, 2, 3]) assert bool(prox2) @tests.skip_with_pyevent def test_multiple_wraps(self): prox1 = tpool.Proxy(re) prox2 = tpool.Proxy(re) prox1.compile('.') x2 = prox1.compile('.') del x2 prox2.compile('.') @tests.skip_with_pyevent def test_wrap_getitem(self): prox = tpool.Proxy([0, 1, 2]) self.assertEqual(prox[0], 0) @tests.skip_with_pyevent def test_wrap_setitem(self): prox = tpool.Proxy([0, 1, 2]) prox[1] = 2 self.assertEqual(prox[1], 2) @tests.skip_with_pyevent def test_wrap_iterator(self): self.reset_timeout(2) prox = tpool.Proxy(range(10)) result = [] for i in prox: result.append(i) self.assertEqual(list(range(10)), result) @tests.skip_with_pyevent def test_wrap_iterator2(self): self.reset_timeout(5) # might take a while due to imprecise sleeping def foo(): import time for x in range(2): yield x time.sleep(0.001) counter = [0] def tick(): for i in six.moves.range(20000): counter[0] += 1 if counter[0] % 20 == 0: eventlet.sleep(0.0001) else: eventlet.sleep() gt = eventlet.spawn(tick) previtem = 0 for item in tpool.Proxy(foo()): assert item >= previtem # make sure the tick happened at least a few times so that we know # that our iterations in foo() were actually tpooled assert counter[0] > 10, counter[0] gt.kill() @tests.skip_with_pyevent def test_raising_exceptions(self): prox = tpool.Proxy(re) def nofunc(): prox.never_name_a_function_like_this() self.assertRaises(AttributeError, nofunc) from tests import tpool_test prox = tpool.Proxy(tpool_test) self.assertRaises(RuntimeError, prox.raise_exception) @tests.skip_with_pyevent def test_variable_and_keyword_arguments_with_function_calls(self): import optparse parser = tpool.Proxy(optparse.OptionParser()) parser.add_option('-n', action='store', type='string', dest='n') opts, args = parser.parse_args(["-nfoo"]) self.assertEqual(opts.n, 'foo') @tests.skip_with_pyevent def test_contention(self): from tests import tpool_test prox = tpool.Proxy(tpool_test) pile = eventlet.GreenPile(4) pile.spawn(lambda: self.assertEqual(prox.one, 1)) pile.spawn(lambda: self.assertEqual(prox.two, 2)) pile.spawn(lambda: self.assertEqual(prox.three, 3)) results = list(pile) self.assertEqual(len(results), 3) @tests.skip_with_pyevent def test_timeout(self): blocking = eventlet.patcher.original('time') eventlet.Timeout(0.1, eventlet.Timeout()) try: tpool.execute(blocking.sleep, 0.3) assert False, 'Expected Timeout' except eventlet.Timeout: pass @tests.skip_with_pyevent def test_killall(self): tpool.killall() tpool.setup() @tests.skip_with_pyevent def test_killall_remaining_results(self): semaphore = eventlet.Event() def native_fun(): time.sleep(.5) def gt_fun(): semaphore.send(None) tpool.execute(native_fun) gt = eventlet.spawn(gt_fun) semaphore.wait() tpool.killall() gt.wait() @tests.skip_with_pyevent def test_autowrap(self): x = tpool.Proxy({'a': 1, 'b': 2}, autowrap=(int,)) assert isinstance(x.get('a'), tpool.Proxy) assert not isinstance(x.items(), tpool.Proxy) # attributes as well as callables from tests import tpool_test x = tpool.Proxy(tpool_test, autowrap=(int,)) assert isinstance(x.one, tpool.Proxy) assert not isinstance(x.none, tpool.Proxy) @tests.skip_with_pyevent def test_autowrap_names(self): x = tpool.Proxy({'a': 1, 'b': 2}, autowrap_names=('get',)) assert isinstance(x.get('a'), tpool.Proxy) assert not isinstance(x.items(), tpool.Proxy) from tests import tpool_test x = tpool.Proxy(tpool_test, autowrap_names=('one',)) assert isinstance(x.one, tpool.Proxy) assert not isinstance(x.two, tpool.Proxy) @tests.skip_with_pyevent def test_autowrap_both(self): from tests import tpool_test x = tpool.Proxy(tpool_test, autowrap=(int,), autowrap_names=('one',)) assert isinstance(x.one, tpool.Proxy) # violating the abstraction to check that we didn't double-wrap assert not isinstance(x._obj, tpool.Proxy) @tests.skip_with_pyevent def test_callable(self): def wrapped(arg): return arg x = tpool.Proxy(wrapped) self.assertEqual(4, x(4)) # verify that it wraps return values if specified x = tpool.Proxy(wrapped, autowrap_names=('__call__',)) assert isinstance(x(4), tpool.Proxy) self.assertEqual("4", str(x(4))) @tests.skip_with_pyevent def test_callable_iterator(self): def wrapped(arg): yield arg yield arg yield arg x = tpool.Proxy(wrapped, autowrap_names=('__call__',)) for r in x(3): self.assertEqual(3, r) @tests.skip_with_pyevent def test_eventlet_timeout(self): def raise_timeout(): raise eventlet.Timeout() self.assertRaises(eventlet.Timeout, tpool.execute, raise_timeout) @tests.skip_with_pyevent def test_tpool_set_num_threads(self): tpool.set_num_threads(5) self.assertEqual(5, tpool._nthreads) class TpoolLongTests(tests.LimitedTestCase): TEST_TIMEOUT = 60 @tests.skip_with_pyevent def test_a_buncha_stuff(self): assert_ = self.assert_ class Dummy(object): def foo(self, when, token=None): assert_(token is not None) time.sleep(random.random() / 200.0) return token def sender_loop(loopnum): obj = tpool.Proxy(Dummy()) count = 100 for n in six.moves.range(count): eventlet.sleep(random.random() / 200.0) now = time.time() token = loopnum * count + n rv = obj.foo(now, token=token) self.assertEqual(token, rv) eventlet.sleep(random.random() / 200.0) cnt = 10 pile = eventlet.GreenPile(cnt) for i in six.moves.range(cnt): pile.spawn(sender_loop, i) results = list(pile) self.assertEqual(len(results), cnt) tpool.killall() @tests.skip_with_pyevent def test_leakage_from_tracebacks(self): tpool.execute(noop) # get it started gc.collect() initial_objs = len(gc.get_objects()) for i in range(10): self.assertRaises(RuntimeError, tpool.execute, raise_exception) gc.collect() middle_objs = len(gc.get_objects()) # some objects will inevitably be created by the previous loop # now we test to ensure that running the loop an order of # magnitude more doesn't generate additional objects for i in six.moves.range(100): self.assertRaises(RuntimeError, tpool.execute, raise_exception) first_created = middle_objs - initial_objs gc.collect() second_created = len(gc.get_objects()) - middle_objs self.assert_(second_created - first_created < 10, "first loop: %s, second loop: %s" % (first_created, second_created)) tpool.killall() def test_isolate_from_socket_default_timeout(): tests.run_isolated('tpool_isolate_socket_default_timeout.py', timeout=5) def test_exception_leak(): tests.run_isolated('tpool_exception_leak.py') eventlet-0.30.2/tests/websocket_new_test.py0000644000076500000240000004774514006212666021512 0ustar temotostaff00000000000000import errno import struct import re import eventlet from eventlet import event from eventlet import websocket from eventlet.green import httplib from eventlet.green import socket import six import tests.wsgi_test # demo app def handle(ws): if ws.path == '/echo': while True: m = ws.wait() if m is None: break ws.send(m) elif ws.path == '/range': for i in range(10): ws.send("msg %d" % i) eventlet.sleep(0.01) elif ws.path == '/error': # some random socket error that we shouldn't normally get raise socket.error(errno.ENOTSOCK) else: ws.close() wsapp = websocket.WebSocketWSGI(handle) class TestWebSocket(tests.wsgi_test._TestBase): TEST_TIMEOUT = 5 def set_site(self): self.site = wsapp def test_incomplete_headers_13(self): headers = dict(kv.split(': ') for kv in [ "Upgrade: websocket", # NOTE: intentionally no connection header "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Version: 13", ]) http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo", headers=headers) resp = http.getresponse() self.assertEqual(resp.status, 400) self.assertEqual(resp.getheader('connection'), 'close') self.assertEqual(resp.read(), b'') # Now, miss off key headers = dict(kv.split(': ') for kv in [ "Upgrade: websocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Version: 13", ]) http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo", headers=headers) resp = http.getresponse() self.assertEqual(resp.status, 400) self.assertEqual(resp.getheader('connection'), 'close') self.assertEqual(resp.read(), b'') # No Upgrade now headers = dict(kv.split(': ') for kv in [ "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Version: 13", ]) http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo", headers=headers) resp = http.getresponse() self.assertEqual(resp.status, 400) self.assertEqual(resp.getheader('connection'), 'close') self.assertEqual(resp.read(), b'') def test_correct_upgrade_request_13(self): for http_connection in ['Upgrade', 'UpGrAdE', 'keep-alive, Upgrade']: connect = [ "GET /echo HTTP/1.1", "Upgrade: websocket", "Connection: %s" % http_connection, "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Version: 13", "Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) result = sock.recv(1024) # The server responds the correct Websocket handshake print('Connection string: %r' % http_connection) self.assertEqual(result, six.b('\r\n'.join([ 'HTTP/1.1 101 Switching Protocols', 'Upgrade: websocket', 'Connection: Upgrade', 'Sec-WebSocket-Accept: ywSyWXCPNsDxLrQdQrn5RFNRfBU=\r\n\r\n', ]))) def test_send_recv_13(self): connect = [ "GET /echo HTTP/1.1", "Upgrade: websocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Version: 13", "Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True) ws.send(b'hello') assert ws.wait() == b'hello' ws.send(b'hello world!\x01') ws.send(u'hello world again!') assert ws.wait() == b'hello world!\x01' assert ws.wait() == u'hello world again!' ws.close() eventlet.sleep(0.01) def test_breaking_the_connection_13(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /echo HTTP/1.1", "Upgrade: websocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Version: 13", "Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) sock.recv(1024) # get the headers sock.close() # close while the app is running done_with_request.wait() assert not error_detected[0] def test_client_closing_connection_13(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /echo HTTP/1.1", "Upgrade: websocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Version: 13", "Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) sock.recv(1024) # get the headers closeframe = struct.pack('!BBIH', 1 << 7 | 8, 1 << 7 | 2, 0, 1000) sock.sendall(closeframe) # "Close the connection" packet. done_with_request.wait() assert not error_detected[0] def test_client_invalid_packet_13(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /echo HTTP/1.1", "Upgrade: websocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Version: 13", "Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) sock.recv(1024) # get the headers sock.sendall(b'\x07\xff') # Weird packet. done_with_request.wait() assert not error_detected[0] class TestWebSocketWithCompression(tests.wsgi_test._TestBase): TEST_TIMEOUT = 5 def set_site(self): self.site = wsapp def setUp(self): super(TestWebSocketWithCompression, self).setUp() self.connect = '\r\n'.join([ "GET /echo HTTP/1.1", "Upgrade: websocket", "Connection: upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Version: 13", "Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==", "Sec-WebSocket-Extensions: %s", '\r\n' ]) self.handshake_re = re.compile(six.b('\r\n'.join([ 'HTTP/1.1 101 Switching Protocols', 'Upgrade: websocket', 'Connection: Upgrade', 'Sec-WebSocket-Accept: ywSyWXCPNsDxLrQdQrn5RFNRfBU=', 'Sec-WebSocket-Extensions: (.+)' '\r\n', ]))) @staticmethod def get_deflated_reply(ws): msg = ws._recv_frame(None) msg.decompressor = None return msg.getvalue() def test_accept_basic_deflate_ext_13(self): for extension in [ 'permessage-deflate', 'PeRMessAGe-dEFlaTe', ]: sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extension)) result = sock.recv(1024) # The server responds the correct Websocket handshake # print('Extension offer: %r' % extension) match = re.match(self.handshake_re, result) assert match is not None assert len(match.groups()) == 1 def test_accept_deflate_ext_context_takeover_13(self): for extension in [ 'permessage-deflate;CLient_No_conteXT_TAkeOver', 'permessage-deflate; SerVER_No_conteXT_TAkeOver', 'permessage-deflate; server_no_context_takeover; client_no_context_takeover', ]: sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extension)) result = sock.recv(1024) # The server responds the correct Websocket handshake # print('Extension offer: %r' % extension) match = re.match(self.handshake_re, result) assert match is not None assert len(match.groups()) == 1 offered_ext_parts = (ex.strip().lower() for ex in extension.split(';')) accepted_ext_parts = match.groups()[0].decode().split('; ') assert all(oep in accepted_ext_parts for oep in offered_ext_parts) def test_accept_deflate_ext_window_max_bits_13(self): for extension_string, vals in [ ('permessage-deflate; client_max_window_bits', [15]), ('permessage-deflate; Server_Max_Window_Bits = 11', [11]), ('permessage-deflate; server_max_window_bits; ' 'client_max_window_bits=9', [15, 9]) ]: sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extension_string)) result = sock.recv(1024) # The server responds the correct Websocket handshake # print('Extension offer: %r' % extension_string) match = re.match(self.handshake_re, result) assert match is not None assert len(match.groups()) == 1 offered_parts = [part.strip().lower() for part in extension_string.split(';')] offered_parts_names = [part.split('=')[0].strip() for part in offered_parts] offered_parts_dict = dict(zip(offered_parts_names[1:], vals)) accepted_ext_parts = match.groups()[0].decode().split('; ') assert accepted_ext_parts[0] == 'permessage-deflate' for param, val in (part.split('=') for part in accepted_ext_parts[1:]): assert int(val) == offered_parts_dict[param] def test_reject_max_window_bits_out_of_range_13(self): extension_string = ('permessage-deflate; client_max_window_bits=7,' 'permessage-deflate; server_max_window_bits=16, ' 'permessage-deflate; client_max_window_bits=16; ' 'server_max_window_bits=7, ' 'permessage-deflate') sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extension_string)) result = sock.recv(1024) # The server responds the correct Websocket handshake # print('Extension offer: %r' % extension_string) match = re.match(self.handshake_re, result) assert match.groups()[0] == b'permessage-deflate' def test_server_compress_with_context_takeover_13(self): extensions_string = 'permessage-deflate; client_no_context_takeover;' extensions = {'permessage-deflate': { 'client_no_context_takeover': True, 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extensions_string)) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) # Deflated values taken from Section 7.2.3 of RFC 7692 # https://tools.ietf.org/html/rfc7692#section-7.2.3 ws.send(b'Hello') msg1 = self.get_deflated_reply(ws) assert msg1 == b'\xf2\x48\xcd\xc9\xc9\x07\x00' ws.send(b'Hello') msg2 = self.get_deflated_reply(ws) assert msg2 == b'\xf2\x00\x11\x00\x00' ws.close() eventlet.sleep(0.01) def test_server_compress_no_context_takeover_13(self): extensions_string = 'permessage-deflate; server_no_context_takeover;' extensions = {'permessage-deflate': { 'client_no_context_takeover': False, 'server_no_context_takeover': True}} sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extensions_string)) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) masked_msg1 = ws._pack_message(b'Hello', masked=True) ws._send(masked_msg1) masked_msg2 = ws._pack_message(b'Hello', masked=True) ws._send(masked_msg2) # Verify that client uses context takeover by checking # that the second message assert len(masked_msg2) < len(masked_msg1) # Verify that server drops context between messages # Deflated values taken from Section 7.2.3 of RFC 7692 # https://tools.ietf.org/html/rfc7692#section-7.2.3 reply_msg1 = self.get_deflated_reply(ws) assert reply_msg1 == b'\xf2\x48\xcd\xc9\xc9\x07\x00' reply_msg2 = self.get_deflated_reply(ws) assert reply_msg2 == b'\xf2\x48\xcd\xc9\xc9\x07\x00' def test_client_compress_with_context_takeover_13(self): extensions = {'permessage-deflate': { 'client_no_context_takeover': False, 'server_no_context_takeover': True}} ws = websocket.RFC6455WebSocket(None, {}, client=True, extensions=extensions) # Deflated values taken from Section 7.2.3 of RFC 7692 # modified opcode to Binary instead of Text # https://tools.ietf.org/html/rfc7692#section-7.2.3 packed_msg_1 = ws._pack_message(b'Hello', masked=False) assert packed_msg_1 == b'\xc2\x07\xf2\x48\xcd\xc9\xc9\x07\x00' packed_msg_2 = ws._pack_message(b'Hello', masked=False) assert packed_msg_2 == b'\xc2\x05\xf2\x00\x11\x00\x00' eventlet.sleep(0.01) def test_client_compress_no_context_takeover_13(self): extensions = {'permessage-deflate': { 'client_no_context_takeover': True, 'server_no_context_takeover': False}} ws = websocket.RFC6455WebSocket(None, {}, client=True, extensions=extensions) # Deflated values taken from Section 7.2.3 of RFC 7692 # modified opcode to Binary instead of Text # https://tools.ietf.org/html/rfc7692#section-7.2.3 packed_msg_1 = ws._pack_message(b'Hello', masked=False) assert packed_msg_1 == b'\xc2\x07\xf2\x48\xcd\xc9\xc9\x07\x00' packed_msg_2 = ws._pack_message(b'Hello', masked=False) assert packed_msg_2 == b'\xc2\x07\xf2\x48\xcd\xc9\xc9\x07\x00' def test_compressed_send_recv_13(self): extensions_string = 'permessage-deflate' extensions = {'permessage-deflate': { 'client_no_context_takeover': False, 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extensions_string)) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) ws.send(b'hello') assert ws.wait() == b'hello' ws.send(b'hello world!') ws.send(u'hello world again!') assert ws.wait() == b'hello world!' assert ws.wait() == u'hello world again!' ws.close() eventlet.sleep(0.01) def test_send_uncompressed_msg_13(self): extensions_string = 'permessage-deflate' extensions = {'permessage-deflate': { 'client_no_context_takeover': False, 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extensions_string)) sock.recv(1024) # Send without using deflate, having rsv1 unset ws = websocket.RFC6455WebSocket(sock, {}, client=True) ws.send(b'Hello') # Adding extensions to recognise deflated response ws.extensions = extensions assert ws.wait() == b'Hello' ws.close() eventlet.sleep(0.01) def test_compressed_send_recv_client_no_context_13(self): extensions_string = 'permessage-deflate; client_no_context_takeover' extensions = {'permessage-deflate': { 'client_no_context_takeover': True, 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extensions_string)) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) ws.send(b'hello') assert ws.wait() == b'hello' ws.send(b'hello world!') ws.send(u'hello world again!') assert ws.wait() == b'hello world!' assert ws.wait() == u'hello world again!' ws.close() eventlet.sleep(0.01) def test_compressed_send_recv_server_no_context_13(self): extensions_string = 'permessage-deflate; server_no_context_takeover' extensions = {'permessage-deflate': { 'client_no_context_takeover': False, 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extensions_string)) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) ws.send(b'hello') assert ws.wait() == b'hello' ws.send(b'hello world!') ws.send(u'hello world again!') assert ws.wait() == b'hello world!' assert ws.wait() == u'hello world again!' ws.close() eventlet.sleep(0.01) def test_compressed_send_recv_both_no_context_13(self): extensions_string = ('permessage-deflate;' ' server_no_context_takeover; client_no_context_takeover') extensions = {'permessage-deflate': { 'client_no_context_takeover': True, 'server_no_context_takeover': True}} sock = eventlet.connect(self.server_addr) sock.sendall(six.b(self.connect % extensions_string)) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) ws.send(b'hello') assert ws.wait() == b'hello' ws.send(b'hello world!') ws.send(u'hello world again!') assert ws.wait() == b'hello world!' assert ws.wait() == u'hello world again!' ws.close() eventlet.sleep(0.01) eventlet-0.30.2/tests/websocket_test.py0000644000076500000240000005331414006212666020626 0ustar temotostaff00000000000000import errno import socket import sys import eventlet from eventlet import event from eventlet import greenio from eventlet.green import httplib import six from eventlet.websocket import WebSocket, WebSocketWSGI import tests from tests import mock import tests.wsgi_test # demo app def handle(ws): if ws.path == '/echo': while True: m = ws.wait() if m is None: break ws.send(m) elif ws.path == '/range': for i in range(10): ws.send("msg %d" % i) eventlet.sleep(0.01) elif ws.path == '/error': # some random socket error that we shouldn't normally get raise socket.error(errno.ENOTSOCK) else: ws.close() wsapp = WebSocketWSGI(handle) class TestWebSocket(tests.wsgi_test._TestBase): TEST_TIMEOUT = 5 def set_site(self): self.site = wsapp def test_incorrect_headers(self): http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo") response = http.getresponse() assert response.status == 400 def test_incomplete_headers_75(self): headers = dict(kv.split(': ') for kv in [ "Upgrade: WebSocket", # NOTE: intentionally no connection header "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ]) http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo", headers=headers) resp = http.getresponse() self.assertEqual(resp.status, 400) self.assertEqual(resp.getheader('connection'), 'close') self.assertEqual(resp.read(), b'') def test_incomplete_headers_76(self): # First test: Missing Connection: headers = dict(kv.split(': ') for kv in [ "Upgrade: WebSocket", # NOTE: intentionally no connection header "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", ]) http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo", headers=headers) resp = http.getresponse() self.assertEqual(resp.status, 400) self.assertEqual(resp.getheader('connection'), 'close') self.assertEqual(resp.read(), b'') # Now, miss off key2 headers = dict(kv.split(': ') for kv in [ "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", # NOTE: Intentionally no Key2 header ]) http = httplib.HTTPConnection(*self.server_addr) http.request("GET", "/echo", headers=headers) resp = http.getresponse() self.assertEqual(resp.status, 400) self.assertEqual(resp.getheader('connection'), 'close') self.assertEqual(resp.read(), b'') def test_correct_upgrade_request_75(self): connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) result = sock.recv(1024) # The server responds the correct Websocket handshake self.assertEqual(result, six.b('\r\n'.join([ 'HTTP/1.1 101 Web Socket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'WebSocket-Origin: http://%s:%s' % self.server_addr, 'WebSocket-Location: ws://%s:%s/echo\r\n\r\n' % self.server_addr, ]))) def test_correct_upgrade_request_76(self): connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) result = sock.recv(1024) # The server responds the correct Websocket handshake self.assertEqual(result, six.b('\r\n'.join([ 'HTTP/1.1 101 WebSocket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'Sec-WebSocket-Origin: http://%s:%s' % self.server_addr, 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Location: ws://%s:%s/echo\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.server_addr, ]))) def test_query_string(self): # verify that the query string comes out the other side unscathed connect = [ "GET /echo?query_string HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) result = sock.recv(1024) self.assertEqual(result, six.b('\r\n'.join([ 'HTTP/1.1 101 WebSocket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'Sec-WebSocket-Origin: http://%s:%s' % self.server_addr, 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Location: ' 'ws://%s:%s/echo?query_string\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.server_addr, ]))) def test_empty_query_string(self): # verify that a single trailing ? doesn't get nuked connect = [ "GET /echo? HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) result = sock.recv(1024) self.assertEqual(result, six.b('\r\n'.join([ 'HTTP/1.1 101 WebSocket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'Sec-WebSocket-Origin: http://%s:%s' % self.server_addr, 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Location: ws://%s:%s/echo?\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.server_addr, ]))) def test_sending_messages_to_websocket_75(self): connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) sock.recv(1024) sock.sendall(b'\x00hello\xFF') result = sock.recv(1024) self.assertEqual(result, b'\x00hello\xff') sock.sendall(b'\x00start') eventlet.sleep(0.001) sock.sendall(b' end\xff') result = sock.recv(1024) self.assertEqual(result, b'\x00start end\xff') sock.shutdown(socket.SHUT_RDWR) sock.close() eventlet.sleep(0.01) def test_sending_messages_to_websocket_76(self): connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) sock.recv(1024) sock.sendall(b'\x00hello\xFF') result = sock.recv(1024) self.assertEqual(result, b'\x00hello\xff') sock.sendall(b'\x00start') eventlet.sleep(0.001) sock.sendall(b' end\xff') result = sock.recv(1024) self.assertEqual(result, b'\x00start end\xff') sock.shutdown(socket.SHUT_RDWR) sock.close() eventlet.sleep(0.01) def test_getting_messages_from_websocket_75(self): connect = [ "GET /range HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) resp = sock.recv(1024) headers, result = resp.split(b'\r\n\r\n') msgs = [result.strip(b'\x00\xff')] cnt = 10 while cnt: msgs.append(sock.recv(20).strip(b'\x00\xff')) cnt -= 1 # Last item in msgs is an empty string self.assertEqual(msgs[:-1], [six.b('msg %d' % i) for i in range(10)]) def test_getting_messages_from_websocket_76(self): connect = [ "GET /range HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) resp = sock.recv(1024) headers, result = resp.split(b'\r\n\r\n') msgs = [result[16:].strip(b'\x00\xff')] cnt = 10 while cnt: msgs.append(sock.recv(20).strip(b'\x00\xff')) cnt -= 1 # Last item in msgs is an empty string self.assertEqual(msgs[:-1], [six.b('msg %d' % i) for i in range(10)]) def test_breaking_the_connection_75(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /range HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) sock.recv(1024) # get the headers sock.close() # close while the app is running done_with_request.wait() assert not error_detected[0] def test_breaking_the_connection_76(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /range HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) sock.recv(1024) # get the headers sock.close() # close while the app is running done_with_request.wait() assert not error_detected[0] def test_client_closing_connection_76(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) sock.recv(1024) # get the headers sock.sendall(b'\xff\x00') # "Close the connection" packet. done_with_request.wait() assert not error_detected[0] def test_client_invalid_packet_76(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) sock.recv(1024) # get the headers sock.sendall(b'\xef\x00') # Weird packet. done_with_request.wait() assert error_detected[0] def test_server_closing_connect_76(self): connect = [ "GET / HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) resp = sock.recv(1024) headers, result = resp.split(b'\r\n\r\n') # The remote server should have immediately closed the connection. self.assertEqual(result[16:], b'\xff\x00') def test_app_socket_errors_75(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /error HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) sock.recv(1024) done_with_request.wait() assert error_detected[0] def test_app_socket_errors_76(self): error_detected = [False] done_with_request = event.Event() site = self.site def error_detector(environ, start_response): try: try: return site(environ, start_response) except: error_detected[0] = True raise finally: done_with_request.send(True) self.site = error_detector self.spawn_server() connect = [ "GET /error HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) sock.recv(1024) done_with_request.wait() assert error_detected[0] def test_close_idle(self): pool = eventlet.GreenPool() # use log=stderr when test runner can capture it self.spawn_server(custom_pool=pool, log=sys.stdout) connect = ( 'GET /echo HTTP/1.1', 'Upgrade: WebSocket', 'Connection: Upgrade', 'Host: %s:%s' % self.server_addr, 'Origin: http://%s:%s' % self.server_addr, 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5', 'Sec-WebSocket-Key2: 12998 5 Y3 1 .P00', ) sock = eventlet.connect(self.server_addr) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) sock.recv(1024) sock.sendall(b'\x00hello\xff') result = sock.recv(1024) assert result, b'\x00hello\xff' self.killer.kill(KeyboardInterrupt) with eventlet.Timeout(1): pool.waitall() class TestWebSocketSSL(tests.wsgi_test._TestBase): def set_site(self): self.site = wsapp @tests.skip_if_no_ssl def test_ssl_sending_messages(self): s = eventlet.wrap_ssl(eventlet.listen(('localhost', 0)), certfile=tests.certificate_file, keyfile=tests.private_key_file, server_side=True) self.spawn_server(sock=s) connect = [ "GET /echo HTTP/1.1", "Upgrade: WebSocket", "Connection: Upgrade", "Host: %s:%s" % self.server_addr, "Origin: http://%s:%s" % self.server_addr, "Sec-WebSocket-Protocol: ws", "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5", "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.wrap_ssl(eventlet.connect(self.server_addr)) sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) first_resp = b'' while b'\r\n\r\n' not in first_resp: first_resp += sock.recv() print('resp now:') print(first_resp) # make sure it sets the wss: protocol on the location header loc_line = [x for x in first_resp.split(b"\r\n") if x.lower().startswith(b'sec-websocket-location')][0] expect_wss = ('wss://%s:%s' % self.server_addr).encode() assert expect_wss in loc_line, "Expecting wss protocol in location: %s" % loc_line sock.sendall(b'\x00hello\xFF') result = sock.recv(1024) self.assertEqual(result, b'\x00hello\xff') sock.sendall(b'\x00start') eventlet.sleep(0.001) sock.sendall(b' end\xff') result = sock.recv(1024) self.assertEqual(result, b'\x00start end\xff') greenio.shutdown_safe(sock) sock.close() eventlet.sleep(0.01) class TestWebSocketObject(tests.LimitedTestCase): def setUp(self): self.mock_socket = s = mock.Mock() self.environ = env = dict(HTTP_ORIGIN='http://localhost', HTTP_WEBSOCKET_PROTOCOL='ws', PATH_INFO='test') self.test_ws = WebSocket(s, env) super(TestWebSocketObject, self).setUp() def test_recieve(self): ws = self.test_ws ws.socket.recv.return_value = b'\x00hello\xFF' self.assertEqual(ws.wait(), 'hello') self.assertEqual(ws._buf, b'') self.assertEqual(len(ws._msgs), 0) ws.socket.recv.return_value = b'' self.assertEqual(ws.wait(), None) self.assertEqual(ws._buf, b'') self.assertEqual(len(ws._msgs), 0) def test_send_to_ws(self): ws = self.test_ws ws.send(u'hello') assert ws.socket.sendall.called_with("\x00hello\xFF") ws.send(10) assert ws.socket.sendall.called_with("\x0010\xFF") def test_close_ws(self): ws = self.test_ws ws.close() assert ws.socket.shutdown.called_with(True) eventlet-0.30.2/tests/wsgi_test.py0000644000076500000240000023347514006212666017621 0ustar temotostaff00000000000000# coding: utf-8 import cgi import collections import errno import os import shutil import signal import socket import sys import tempfile import traceback import eventlet from eventlet import debug from eventlet import event from eventlet import greenio from eventlet import greenthread from eventlet import support from eventlet import tpool from eventlet import wsgi from eventlet.green import socket as greensocket from eventlet.green import ssl from eventlet.support import bytes_to_str import six from six.moves.urllib import parse import tests certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt') private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key') HttpReadResult = collections.namedtuple( 'HttpReadResult', 'status headers_lower body headers_original') def hello_world(env, start_response): if env['PATH_INFO'] == 'notexist': start_response('404 Not Found', [('Content-type', 'text/plain')]) return [b"not found"] start_response('200 OK', [('Content-type', 'text/plain')]) return [b"hello world"] def chunked_app(env, start_response): start_response('200 OK', [('Content-type', 'text/plain')]) yield b"this" yield b"is" yield b"chunked" def chunked_fail_app(environ, start_response): """http://rhodesmill.org/brandon/2013/chunked-wsgi/ """ headers = [('Content-Type', 'text/plain')] start_response('200 OK', headers) # We start streaming data just fine. yield b"The dwarves of yore made mighty spells," yield b"While hammers fell like ringing bells" # Then the back-end fails! try: 1 / 0 except Exception: start_response('500 Error', headers, sys.exc_info()) return # So rest of the response data is not available. yield b"In places deep, where dark things sleep," yield b"In hollow halls beneath the fells." def big_chunks(env, start_response): start_response('200 OK', [('Content-type', 'text/plain')]) line = b'a' * 8192 for x in range(10): yield line def use_write(env, start_response): if env['PATH_INFO'] == '/a': write = start_response('200 OK', [('Content-type', 'text/plain'), ('Content-Length', '5')]) write(b'abcde') if env['PATH_INFO'] == '/b': write = start_response('200 OK', [('Content-type', 'text/plain')]) write(b'abcde') return [] def chunked_post(env, start_response): start_response('200 OK', [('Content-type', 'text/plain')]) if env['PATH_INFO'] == '/a': return [env['wsgi.input'].read()] elif env['PATH_INFO'] == '/b': return [x for x in iter(lambda: env['wsgi.input'].read(4096), b'')] elif env['PATH_INFO'] == '/c': return [x for x in iter(lambda: env['wsgi.input'].read(1), b'')] def already_handled(env, start_response): start_response('200 OK', [('Content-type', 'text/plain')]) return wsgi.ALREADY_HANDLED class Site(object): def __init__(self): self.application = hello_world def __call__(self, env, start_response): return self.application(env, start_response) class IterableApp(object): def __init__(self, send_start_response=False, return_val=wsgi.ALREADY_HANDLED): self.send_start_response = send_start_response self.return_val = return_val self.env = {} def __call__(self, env, start_response): self.env = env if self.send_start_response: start_response('200 OK', [('Content-type', 'text/plain')]) return self.return_val class IterableSite(Site): def __call__(self, env, start_response): it = self.application(env, start_response) for i in it: yield i CONTENT_LENGTH = 'content-length' def recvall(sock): result = b'' while True: chunk = sock.recv(16 << 10) if chunk == b'': return result result += chunk class ConnectionClosed(Exception): pass def send_expect_close(sock, buf): # Some tests will induce behavior that causes the remote end to # close the connection before all of the data has been written. # With small kernel buffer sizes, this can cause an EPIPE error. # Since the test expects an early close, this can be ignored. try: sock.sendall(buf) except socket.error as exc: if support.get_errno(exc) != errno.EPIPE: raise def read_http(sock): fd = sock.makefile('rb') try: response_line = bytes_to_str(fd.readline().rstrip(b'\r\n')) except socket.error as exc: # TODO find out whether 54 is ok here or not, I see it when running tests # on Python 3 if support.get_errno(exc) in (10053, 54): raise ConnectionClosed raise if not response_line: raise ConnectionClosed(response_line) header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line) headers_original = {} headers_lower = {} for x in header_lines: x = x.strip() if not x: continue key, value = bytes_to_str(x, encoding='latin1').split(':', 1) key = key.rstrip() value = value.lstrip() key_lower = key.lower() # FIXME: Duplicate headers are allowed as per HTTP RFC standard, # the client and/or intermediate proxies are supposed to treat them # as a single header with values concatenated using space (' ') delimiter. assert key_lower not in headers_lower, "header duplicated: {0}".format(key) headers_original[key] = value headers_lower[key_lower] = value content_length_str = headers_lower.get(CONTENT_LENGTH.lower(), '') if content_length_str: num = int(content_length_str) body = fd.read(num) else: # read until EOF body = fd.read() result = HttpReadResult( status=response_line, headers_lower=headers_lower, body=body, headers_original=headers_original) return result class _TestBase(tests.LimitedTestCase): def setUp(self): super(_TestBase, self).setUp() self.site = Site() self.killer = None self.set_site() self.spawn_server() def tearDown(self): greenthread.kill(self.killer) eventlet.sleep(0) super(_TestBase, self).tearDown() def spawn_server(self, **kwargs): """Spawns a new wsgi server with the given arguments using :meth:`spawn_thread`. Sets `self.server_addr` to (host, port) tuple suitable for `socket.connect`. """ self.logfile = six.StringIO() new_kwargs = dict(max_size=128, log=self.logfile, site=self.site) new_kwargs.update(kwargs) if 'sock' not in new_kwargs: new_kwargs['sock'] = eventlet.listen(('localhost', 0)) self.server_addr = new_kwargs['sock'].getsockname() self.spawn_thread(wsgi.server, **new_kwargs) def spawn_thread(self, target, **kwargs): """Spawns a new greenthread using specified target and arguments. Kills any previously-running server and sets self.killer to the greenthread running the target. """ eventlet.sleep(0) # give previous server a chance to start if self.killer: greenthread.kill(self.killer) self.killer = eventlet.spawn(target, **kwargs) def set_site(self): raise NotImplementedError class TestHttpd(_TestBase): def set_site(self): self.site = Site() def test_001_server(self): sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n') result = recvall(sock) # The server responds with the maximum version it supports assert result.startswith(b'HTTP'), result assert result.endswith(b'hello world'), result def test_002_keepalive(self): sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') read_http(sock) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') read_http(sock) def test_004_close_keepalive(self): sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result1 = read_http(sock) assert result1.status == 'HTTP/1.1 200 OK' sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') result2 = read_http(sock) assert result2.status == 'HTTP/1.1 200 OK' assert result2.headers_lower['connection'] == 'close' sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') self.assertRaises(ConnectionClosed, read_http, sock) def test_006_reject_long_urls(self): sock = eventlet.connect(self.server_addr) path_parts = [] for ii in range(3000): path_parts.append('path') path = '/'.join(path_parts) request = 'GET /%s HTTP/1.0\r\nHost: localhost\r\n\r\n' % path send_expect_close(sock, request.encode()) fd = sock.makefile('rb') result = fd.readline() if result: # windows closes the socket before the data is flushed, # so we never get anything back status = result.split(b' ')[1] self.assertEqual(status, b'414') fd.close() def test_007_get_arg(self): # define a new handler that does a get_arg as well as a read_body def new_app(env, start_response): body = bytes_to_str(env['wsgi.input'].read()) a = parse.parse_qs(body).get('a', [1])[0] start_response('200 OK', [('Content-type', 'text/plain')]) return [six.b('a is %s, body is %s' % (a, body))] self.site.application = new_app sock = eventlet.connect(self.server_addr) request = b'\r\n'.join(( b'POST / HTTP/1.0', b'Host: localhost', b'Content-Length: 3', b'', b'a=a')) sock.sendall(request) # send some junk after the actual request sock.sendall(b'01234567890123456789') result = read_http(sock) self.assertEqual(result.body, b'a is a, body is a=a') def test_008_correctresponse(self): sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result_200 = read_http(sock) sock.sendall(b'GET /notexist HTTP/1.1\r\nHost: localhost\r\n\r\n') read_http(sock) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result_test = read_http(sock) self.assertEqual(result_200.status, result_test.status) def test_009_chunked_response(self): self.site.application = chunked_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') assert b'Transfer-Encoding: chunked' in recvall(sock) def test_010_no_chunked_http_1_0(self): self.site.application = chunked_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\nConnection: close\r\n\r\n') assert b'Transfer-Encoding: chunked' not in recvall(sock) def test_011_multiple_chunks(self): self.site.application = big_chunks sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') fd.flush() headers = b'' while True: line = fd.readline() if line == b'\r\n': break else: headers += line assert b'Transfer-Encoding: chunked' in headers chunks = 0 chunklen = int(fd.readline(), 16) while chunklen: chunks += 1 fd.read(chunklen) fd.readline() # CRLF chunklen = int(fd.readline(), 16) assert chunks > 1 response = fd.read() # Require a CRLF to close the message body self.assertEqual(response, b'\r\n') def test_partial_writes_are_handled(self): # https://github.com/eventlet/eventlet/issues/295 # Eventlet issue: "Python 3: wsgi doesn't handle correctly partial # write of socket send() when using writelines()". # # The bug was caused by the default writelines() implementaiton # (used by the wsgi module) which doesn't check if write() # successfully completed sending *all* data therefore data could be # lost and the client could be left hanging forever. # # Switching wsgi wfile to buffered mode fixes the issue. # # Related CPython issue: "Raw I/O writelines() broken", # http://bugs.python.org/issue26292 # # Custom accept() and send() in order to simulate a connection that # only sends one byte at a time so that any code that doesn't handle # partial writes correctly has to fail. listen_socket = eventlet.listen(('localhost', 0)) original_accept = listen_socket.accept def accept(): connection, address = original_accept() original_send = connection.send def send(b, *args): b = b[:1] return original_send(b, *args) connection.send = send return connection, address listen_socket.accept = accept def application(env, start_response): # Sending content-length is important here so that the client knows # exactly how many bytes does it need to wait for. start_response('200 OK', [('Content-length', 3)]) yield 'asd' self.spawn_server(sock=listen_socket) self.site.application = application sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') # This would previously hang forever result = read_http(sock) assert result.body == b'asd' @tests.skip_if_no_ssl def test_012_ssl_server(self): def wsgi_app(environ, start_response): start_response('200 OK', {}) return [environ['wsgi.input'].read()] certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt') private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key') server_sock = eventlet.wrap_ssl(eventlet.listen(('localhost', 0)), certfile=certificate_file, keyfile=private_key_file, server_side=True) self.spawn_server(sock=server_sock, site=wsgi_app) sock = eventlet.connect(self.server_addr) sock = eventlet.wrap_ssl(sock) sock.write( b'POST /foo HTTP/1.1\r\nHost: localhost\r\n' b'Connection: close\r\nContent-length:3\r\n\r\nabc') result = recvall(sock) assert result.endswith(b'abc') @tests.skip_if_no_ssl def test_013_empty_return(self): def wsgi_app(environ, start_response): start_response("200 OK", []) return [b""] certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt') private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key') server_sock = eventlet.wrap_ssl(eventlet.listen(('localhost', 0)), certfile=certificate_file, keyfile=private_key_file, server_side=True) self.spawn_server(sock=server_sock, site=wsgi_app) sock = eventlet.connect(('localhost', server_sock.getsockname()[1])) sock = eventlet.wrap_ssl(sock) sock.write(b'GET /foo HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') result = recvall(sock) assert result[-4:] == b'\r\n\r\n' def test_014_chunked_post(self): self.site.application = chunked_post sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') fd.write('PUT /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n' 'Transfer-Encoding: chunked\r\n\r\n' '2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'.encode()) fd.flush() while True: if fd.readline() == b'\r\n': break response = fd.read() assert response == b'oh hai', 'invalid response %s' % response sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') fd.write('PUT /b HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n' 'Transfer-Encoding: chunked\r\n\r\n' '2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'.encode()) fd.flush() while True: if fd.readline() == b'\r\n': break response = fd.read() assert response == b'oh hai', 'invalid response %s' % response sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') fd.write('PUT /c HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n' 'Transfer-Encoding: chunked\r\n\r\n' '2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'.encode()) fd.flush() while True: if fd.readline() == b'\r\n': break response = fd.read(8192) assert response == b'oh hai', 'invalid response %s' % response def test_015_write(self): self.site.application = use_write sock = eventlet.connect(self.server_addr) sock.sendall(b'GET /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') result1 = read_http(sock) assert 'content-length' in result1.headers_lower sock = eventlet.connect(self.server_addr) sock.sendall(b'GET /b HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') result2 = read_http(sock) assert 'transfer-encoding' in result2.headers_lower assert result2.headers_lower['transfer-encoding'] == 'chunked' def test_016_repeated_content_length(self): """content-length header was being doubled up if it was set in start_response and could also be inferred from the iterator """ def wsgi_app(environ, start_response): start_response('200 OK', [('Content-Length', '7')]) return [b'testing'] self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') fd.write(b'GET /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') fd.flush() header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line) self.assertEqual(1, len( [l for l in header_lines if l.lower().startswith(b'content-length')])) @tests.skip_if_no_ssl def test_017_ssl_zeroreturnerror(self): def server(sock, site, log): try: serv = wsgi.Server(sock, sock.getsockname(), site, log) client_socket, addr = sock.accept() serv.process_request([addr, client_socket, wsgi.STATE_IDLE]) return True except Exception: traceback.print_exc() return False def wsgi_app(environ, start_response): start_response('200 OK', []) return [environ['wsgi.input'].read()] certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt') private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key') sock = eventlet.wrap_ssl( eventlet.listen(('localhost', 0)), certfile=certificate_file, keyfile=private_key_file, server_side=True) server_coro = eventlet.spawn(server, sock, wsgi_app, self.logfile) client = eventlet.connect(('localhost', sock.getsockname()[1])) client = eventlet.wrap_ssl(client) client.write(b'X') # non-empty payload so that SSL handshake occurs greenio.shutdown_safe(client) client.close() success = server_coro.wait() assert success def test_018_http_10_keepalive(self): # verify that if an http/1.0 client sends connection: keep-alive # that we don't close the connection sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n') result1 = read_http(sock) assert 'connection' in result1.headers_lower self.assertEqual('keep-alive', result1.headers_lower['connection']) # repeat request to verify connection is actually still open sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n') result2 = read_http(sock) assert 'connection' in result2.headers_lower self.assertEqual('keep-alive', result2.headers_lower['connection']) sock.close() def test_019_fieldstorage_compat(self): def use_fieldstorage(environ, start_response): cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ) start_response('200 OK', [('Content-type', 'text/plain')]) return [b'hello!'] self.site.application = use_fieldstorage sock = eventlet.connect(self.server_addr) sock.sendall(b'POST / HTTP/1.1\r\n' b'Host: localhost\r\n' b'Connection: close\r\n' b'Transfer-Encoding: chunked\r\n\r\n' b'2\r\noh\r\n' b'4\r\n hai\r\n0\r\n\r\n') assert b'hello!' in recvall(sock) def test_020_x_forwarded_for(self): request_bytes = ( b'GET / HTTP/1.1\r\nHost: localhost\r\n' + b'X-Forwarded-For: 1.2.3.4, 5.6.7.8\r\n\r\n' ) sock = eventlet.connect(self.server_addr) sock.sendall(request_bytes) sock.recv(1024) sock.close() assert '1.2.3.4,5.6.7.8,127.0.0.1' in self.logfile.getvalue() # turning off the option should work too self.logfile = six.StringIO() self.spawn_server(log_x_forwarded_for=False) sock = eventlet.connect(self.server_addr) sock.sendall(request_bytes) sock.recv(1024) sock.close() assert '1.2.3.4' not in self.logfile.getvalue() assert '5.6.7.8' not in self.logfile.getvalue() assert '127.0.0.1' in self.logfile.getvalue() def test_socket_remains_open(self): greenthread.kill(self.killer) server_sock = eventlet.listen(('localhost', 0)) server_sock_2 = server_sock.dup() self.spawn_server(sock=server_sock_2) # do a single req/response to verify it's up sock = eventlet.connect(server_sock.getsockname()) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n') result = sock.recv(1024) assert result.startswith(b'HTTP'), result assert result.endswith(b'hello world'), result # shut down the server and verify the server_socket fd is still open, # but the actual socketobject passed in to wsgi.server is closed greenthread.kill(self.killer) eventlet.sleep(0) # make the kill go through try: server_sock_2.accept() # shouldn't be able to use this one anymore except socket.error as exc: self.assertEqual(support.get_errno(exc), errno.EBADF) self.spawn_server(sock=server_sock) sock = eventlet.connect(server_sock.getsockname()) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n') result = sock.recv(1024) assert result.startswith(b'HTTP'), result assert result.endswith(b'hello world'), result def test_021_environ_clobbering(self): def clobberin_time(environ, start_response): for environ_var in [ 'wsgi.version', 'wsgi.url_scheme', 'wsgi.input', 'wsgi.errors', 'wsgi.multithread', 'wsgi.multiprocess', 'wsgi.run_once', 'REQUEST_METHOD', 'SCRIPT_NAME', 'RAW_PATH_INFO', 'PATH_INFO', 'QUERY_STRING', 'CONTENT_TYPE', 'CONTENT_LENGTH', 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL']: environ[environ_var] = None start_response('200 OK', [('Content-type', 'text/plain')]) return [] self.site.application = clobberin_time sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\n' b'Host: localhost\r\n' b'Connection: close\r\n' b'\r\n\r\n') assert b'200 OK' in recvall(sock) def test_022_custom_pool(self): # just test that it accepts the parameter for now # TODO(waitall): test that it uses the pool and that you can waitall() to # ensure that all clients finished p = eventlet.GreenPool(5) self.spawn_server(custom_pool=p) # this stuff is copied from test_001_server, could be better factored sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n') result = recvall(sock) assert result.startswith(b'HTTP'), result assert result.endswith(b'hello world'), result def test_023_bad_content_length(self): sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\nContent-length: argh\r\n\r\n') result = recvall(sock) assert result.startswith(b'HTTP'), result assert b'400 Bad Request' in result, result assert b'500' not in result, result sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\nContent-length:\r\n\r\n') result = recvall(sock) assert result.startswith(b'HTTP'), result assert b'400 Bad Request' in result, result assert b'500' not in result, result sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\nContent-length: \r\n\r\n') result = recvall(sock) assert result.startswith(b'HTTP'), result assert b'400 Bad Request' in result, result assert b'500' not in result, result sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\nContent-length: -10\r\n\r\n') result = recvall(sock) assert result.startswith(b'HTTP'), result assert b'400 Bad Request' in result, result assert b'500' not in result, result def test_024_expect_100_continue(self): def wsgi_app(environ, start_response): if int(environ['CONTENT_LENGTH']) > 1024: start_response('417 Expectation Failed', [('Content-Length', '7')]) return [b'failure'] else: text = environ['wsgi.input'].read() start_response('200 OK', [('Content-Length', str(len(text)))]) return [text] self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') fd.write(b'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 1025\r\n' b'Expect: 100-continue\r\n\r\n') fd.flush() result = read_http(sock) self.assertEqual(result.status, 'HTTP/1.1 417 Expectation Failed') self.assertEqual(result.body, b'failure') for expect_value in ('100-continue', '100-Continue'): fd.write( 'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 7\r\n' 'Expect: {}\r\n\r\ntesting'.format(expect_value).encode()) fd.flush() header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line) assert header_lines[0].startswith(b'HTTP/1.1 200 OK') assert fd.read(7) == b'testing' fd.close() sock.close() def test_024a_expect_100_continue_with_headers(self): def wsgi_app(environ, start_response): if int(environ['CONTENT_LENGTH']) > 1024: start_response('417 Expectation Failed', [('Content-Length', '7')]) return [b'failure'] else: environ['wsgi.input'].set_hundred_continue_response_headers( [('Hundred-Continue-Header-1', 'H1'), ('Hundred-Continue-Header-2', 'H2'), ('Hundred-Continue-Header-k', 'Hk')]) text = environ['wsgi.input'].read() start_response('200 OK', [('Content-Length', str(len(text)))]) return [text] self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') fd.write(b'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 1025\r\n' b'Expect: 100-continue\r\n\r\n') fd.flush() result = read_http(sock) self.assertEqual(result.status, 'HTTP/1.1 417 Expectation Failed') self.assertEqual(result.body, b'failure') fd.write( b'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 7\r\n' b'Expect: 100-continue\r\n\r\ntesting') fd.flush() header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') headers = dict((k, v) for k, v in (h.split(b': ', 1) for h in header_lines[1:])) assert b'Hundred-Continue-Header-1' in headers assert b'Hundred-Continue-Header-2' in headers assert b'Hundred-Continue-Header-K' in headers self.assertEqual(b'H1', headers[b'Hundred-Continue-Header-1']) self.assertEqual(b'H2', headers[b'Hundred-Continue-Header-2']) self.assertEqual(b'Hk', headers[b'Hundred-Continue-Header-K']) header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line) assert header_lines[0].startswith(b'HTTP/1.1 200 OK') self.assertEqual(fd.read(7), b'testing') fd.close() sock.close() def test_024b_expect_100_continue_with_headers_multiple_chunked(self): def wsgi_app(environ, start_response): environ['wsgi.input'].set_hundred_continue_response_headers( [('Hundred-Continue-Header-1', 'H1'), ('Hundred-Continue-Header-2', 'H2')]) text = environ['wsgi.input'].read() environ['wsgi.input'].set_hundred_continue_response_headers( [('Hundred-Continue-Header-3', 'H3')]) environ['wsgi.input'].send_hundred_continue_response() text += environ['wsgi.input'].read() start_response('200 OK', [('Content-Length', str(len(text)))]) return [text] self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') fd.write(b'PUT /a HTTP/1.1\r\n' b'Host: localhost\r\nConnection: close\r\n' b'Transfer-Encoding: chunked\r\n' b'Expect: 100-continue\r\n\r\n') fd.flush() # Expect 1st 100-continue response header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') headers = dict((k, v) for k, v in (h.split(b': ', 1) for h in header_lines[1:])) assert b'Hundred-Continue-Header-1' in headers assert b'Hundred-Continue-Header-2' in headers self.assertEqual(b'H1', headers[b'Hundred-Continue-Header-1']) self.assertEqual(b'H2', headers[b'Hundred-Continue-Header-2']) # Send message 1 fd.write(b'5\r\nfirst\r\n8\r\n message\r\n0\r\n\r\n') fd.flush() # Expect a 2nd 100-continue response header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') headers = dict((k, v) for k, v in (h.split(b': ', 1) for h in header_lines[1:])) assert b'Hundred-Continue-Header-3' in headers self.assertEqual(b'H3', headers[b'Hundred-Continue-Header-3']) # Send message 2 fd.write(b'8\r\n, second\r\n8\r\n message\r\n0\r\n\r\n') fd.flush() # Expect final 200-OK header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 200 OK') self.assertEqual(fd.read(29), b'first message, second message') fd.close() sock.close() def test_024c_expect_100_continue_with_headers_multiple_nonchunked(self): def wsgi_app(environ, start_response): environ['wsgi.input'].set_hundred_continue_response_headers( [('Hundred-Continue-Header-1', 'H1'), ('Hundred-Continue-Header-2', 'H2')]) text = environ['wsgi.input'].read(13) environ['wsgi.input'].set_hundred_continue_response_headers( [('Hundred-Continue-Header-3', 'H3')]) environ['wsgi.input'].send_hundred_continue_response() text += environ['wsgi.input'].read(16) start_response('200 OK', [('Content-Length', str(len(text)))]) return [text] self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') fd.write(b'PUT /a HTTP/1.1\r\n' b'Host: localhost\r\nConnection: close\r\n' b'Content-Length: 29\r\n' b'Expect: 100-continue\r\n\r\n') fd.flush() # Expect 1st 100-continue response header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') headers = dict((k, v) for k, v in (h.split(b': ', 1) for h in header_lines[1:])) assert b'Hundred-Continue-Header-1' in headers assert b'Hundred-Continue-Header-2' in headers self.assertEqual(b'H1', headers[b'Hundred-Continue-Header-1']) self.assertEqual(b'H2', headers[b'Hundred-Continue-Header-2']) # Send message 1 fd.write(b'first message') fd.flush() # Expect a 2nd 100-continue response header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') headers = dict((k, v) for k, v in (h.split(b': ', 1) for h in header_lines[1:])) assert b'Hundred-Continue-Header-3' in headers self.assertEqual(b'H3', headers[b'Hundred-Continue-Header-3']) # Send message 2 fd.write(b', second message\r\n') fd.flush() # Expect final 200-OK header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 200 OK') self.assertEqual(fd.read(29), b'first message, second message') fd.close() sock.close() def test_024d_expect_100_continue_with_eager_app_chunked(self): def wsgi_app(environ, start_response): # app knows it's going to do some time-intensive thing and doesn't # want clients to time out, so it's protocol says to: # * generally expect a successful status code, # * be prepared to eat some whitespace that will get dribbled out # periodically, and # * parse the final status from the response body. environ['eventlet.minimum_write_chunk_size'] = 0 start_response('202 Accepted', []) def resp_gen(): yield b' ' environ['wsgi.input'].read() yield b' ' yield b'\n503 Service Unavailable\n\nOops!\n' return resp_gen() self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') fd.write(b'PUT /a HTTP/1.1\r\n' b'Host: localhost\r\nConnection: close\r\n' b'Transfer-Encoding: chunked\r\n' b'Expect: 100-continue\r\n\r\n') fd.flush() # Expect the optimistic response header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line.strip()) self.assertEqual(header_lines[0], b'HTTP/1.1 202 Accepted') def chunkify(data): return '{:x}'.format(len(data)).encode('ascii') + b'\r\n' + data + b'\r\n' def expect_chunk(data): expected = chunkify(data) self.assertEqual(expected, fd.read(len(expected))) # Can even see that initial whitespace expect_chunk(b' ') # Send message fd.write(chunkify(b'some data')) fd.write(chunkify(b'')) # end-of-message fd.flush() # Expect final response expect_chunk(b' ') expect_chunk(b'\n503 Service Unavailable\n\nOops!\n') expect_chunk(b'') # end-of-message fd.close() sock.close() def test_025_accept_errors(self): debug.hub_exceptions(True) listener = greensocket.socket() listener.bind(('localhost', 0)) # NOT calling listen, to trigger the error with tests.capture_stderr() as log: self.spawn_server(sock=listener) eventlet.sleep(0) # need to enter server loop try: eventlet.connect(self.server_addr) self.fail("Didn't expect to connect") except socket.error as exc: self.assertEqual(support.get_errno(exc), errno.ECONNREFUSED) log_content = log.getvalue() assert 'Invalid argument' in log_content, log_content debug.hub_exceptions(False) def test_026_log_format(self): self.spawn_server(log_format="HI %(request_line)s HI") sock = eventlet.connect(self.server_addr) sock.sendall(b'GET /yo! HTTP/1.1\r\nHost: localhost\r\n\r\n') sock.recv(1024) sock.close() assert '\nHI GET /yo! HTTP/1.1 HI\n' in self.logfile.getvalue(), self.logfile.getvalue() def test_close_chunked_with_1_0_client(self): # verify that if we return a generator from our app # and we're not speaking with a 1.1 client, that we # close the connection self.site.application = chunked_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n') result = read_http(sock) self.assertEqual(result.headers_lower['connection'], 'close') self.assertNotEqual(result.headers_lower.get('transfer-encoding'), 'chunked') self.assertEqual(result.body, b"thisischunked") def test_chunked_response_when_app_yields_empty_string(self): def empty_string_chunked_app(env, start_response): env['eventlet.minimum_write_chunk_size'] = 0 # no buffering start_response('200 OK', [('Content-type', 'text/plain')]) return iter([b"stuff", b"", b"more stuff"]) self.site.application = empty_string_chunked_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') result = read_http(sock) self.assertEqual(result.headers_lower.get('transfer-encoding'), 'chunked') self.assertEqual(result.body, b"5\r\nstuff\r\na\r\nmore stuff\r\n0\r\n\r\n") def test_minimum_chunk_size_parameter_leaves_httpprotocol_class_member_intact(self): start_size = wsgi.HttpProtocol.minimum_chunk_size self.spawn_server(minimum_chunk_size=start_size * 2) sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') read_http(sock) self.assertEqual(wsgi.HttpProtocol.minimum_chunk_size, start_size) sock.close() def test_error_in_chunked_closes_connection(self): # From http://rhodesmill.org/brandon/2013/chunked-wsgi/ self.spawn_server(minimum_chunk_size=1) self.site.application = chunked_fail_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result = read_http(sock) self.assertEqual(result.status, 'HTTP/1.1 200 OK') self.assertEqual(result.headers_lower.get('transfer-encoding'), 'chunked') expected_body = ( b'27\r\nThe dwarves of yore made mighty spells,\r\n' b'25\r\nWhile hammers fell like ringing bells\r\n') self.assertEqual(result.body, expected_body) # verify that socket is closed by server self.assertEqual(sock.recv(1), b'') def test_026_http_10_nokeepalive(self): # verify that if an http/1.0 client sends connection: keep-alive # and the server doesn't accept keep-alives, we close the connection self.spawn_server(keepalive=False) sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n') result = read_http(sock) self.assertEqual(result.headers_lower['connection'], 'close') def test_027_keepalive_chunked(self): self.site.application = chunked_post sock = eventlet.connect(self.server_addr) common_suffix = ( b'Host: localhost\r\nTransfer-Encoding: chunked\r\n\r\n' + b'10\r\n0123456789abcdef\r\n0\r\n\r\n') sock.sendall(b'PUT /a HTTP/1.1\r\n' + common_suffix) read_http(sock) sock.sendall(b'PUT /b HTTP/1.1\r\n' + common_suffix) read_http(sock) sock.sendall(b'PUT /c HTTP/1.1\r\n' + common_suffix) read_http(sock) sock.sendall(b'PUT /a HTTP/1.1\r\n' + common_suffix) read_http(sock) sock.close() @tests.skip_if_no_ssl def test_028_ssl_handshake_errors(self): errored = [False] def server(sock): try: wsgi.server(sock=sock, site=hello_world, log=self.logfile) errored[0] = 'SSL handshake error caused wsgi.server to exit.' except greenthread.greenlet.GreenletExit: pass except Exception as e: errored[0] = 'SSL handshake error raised exception %s.' % e raise for data in ('', 'GET /non-ssl-request HTTP/1.0\r\n\r\n'): srv_sock = eventlet.wrap_ssl( eventlet.listen(('localhost', 0)), certfile=certificate_file, keyfile=private_key_file, server_side=True) addr = srv_sock.getsockname() g = eventlet.spawn_n(server, srv_sock) client = eventlet.connect(addr) if data: # send non-ssl request client.sendall(data.encode()) else: # close sock prematurely client.close() eventlet.sleep(0) # let context switch back to server assert not errored[0], errored[0] # make another request to ensure the server's still alive try: client = ssl.wrap_socket(eventlet.connect(addr)) client.write(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n') result = recvall(client) assert result.startswith(b'HTTP'), result assert result.endswith(b'hello world') except ImportError: pass # TODO(openssl): should test with OpenSSL greenthread.kill(g) def test_029_posthooks(self): posthook1_count = [0] posthook2_count = [0] def posthook1(env, value, multiplier=1): self.assertEqual(env['local.test'], 'test_029_posthooks') posthook1_count[0] += value * multiplier def posthook2(env, value, divisor=1): self.assertEqual(env['local.test'], 'test_029_posthooks') posthook2_count[0] += value / divisor def one_posthook_app(env, start_response): env['local.test'] = 'test_029_posthooks' if 'eventlet.posthooks' not in env: start_response('500 eventlet.posthooks not supported', [('Content-Type', 'text/plain')]) else: env['eventlet.posthooks'].append( (posthook1, (2,), {'multiplier': 3})) start_response('200 OK', [('Content-Type', 'text/plain')]) yield b'' self.site.application = one_posthook_app sock = eventlet.connect(self.server_addr) fp = sock.makefile('rwb') fp.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') fp.flush() self.assertEqual(fp.readline(), b'HTTP/1.1 200 OK\r\n') fp.close() sock.close() self.assertEqual(posthook1_count[0], 6) self.assertEqual(posthook2_count[0], 0) def two_posthook_app(env, start_response): env['local.test'] = 'test_029_posthooks' if 'eventlet.posthooks' not in env: start_response('500 eventlet.posthooks not supported', [('Content-Type', 'text/plain')]) else: env['eventlet.posthooks'].append( (posthook1, (4,), {'multiplier': 5})) env['eventlet.posthooks'].append( (posthook2, (100,), {'divisor': 4})) start_response('200 OK', [('Content-Type', 'text/plain')]) yield b'' self.site.application = two_posthook_app sock = eventlet.connect(self.server_addr) fp = sock.makefile('rwb') fp.write(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') fp.flush() self.assertEqual(fp.readline(), b'HTTP/1.1 200 OK\r\n') fp.close() sock.close() self.assertEqual(posthook1_count[0], 26) self.assertEqual(posthook2_count[0], 25) def test_030_reject_long_header_lines(self): sock = eventlet.connect(self.server_addr) request = 'GET / HTTP/1.0\r\nHost: localhost\r\nLong: %s\r\n\r\n' % \ ('a' * 10000) send_expect_close(sock, request.encode()) result = read_http(sock) self.assertEqual(result.status, 'HTTP/1.0 400 Header Line Too Long') def test_031_reject_large_headers(self): sock = eventlet.connect(self.server_addr) headers = ('Name: %s\r\n' % ('a' * 7000,)) * 20 request = 'GET / HTTP/1.0\r\nHost: localhost\r\n%s\r\n\r\n' % headers send_expect_close(sock, request.encode()) result = read_http(sock) self.assertEqual(result.status, 'HTTP/1.0 400 Headers Too Large') def test_032_wsgi_input_as_iterable(self): # https://bitbucket.org/eventlet/eventlet/issue/150 # env['wsgi.input'] returns a single byte at a time # when used as an iterator g = [0] def echo_by_iterating(env, start_response): start_response('200 OK', [('Content-type', 'text/plain')]) for chunk in env['wsgi.input']: g[0] += 1 yield chunk self.site.application = echo_by_iterating upload_data = b'123456789abcdef' * 100 request = ( 'POST / HTTP/1.0\r\n' 'Host: localhost\r\n' 'Content-Length: %i\r\n\r\n%s' ) % (len(upload_data), bytes_to_str(upload_data)) sock = eventlet.connect(self.server_addr) sock.sendall(request.encode()) result = read_http(sock) self.assertEqual(result.body, upload_data) self.assertEqual(g[0], 1) def test_zero_length_chunked_response(self): def zero_chunked_app(env, start_response): start_response('200 OK', [('Content-type', 'text/plain')]) yield b"" self.site.application = zero_chunked_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') response = recvall(sock).split(b'\r\n') headers = [] while True: h = response.pop(0) headers.append(h) if h == b'': break assert b'Transfer-Encoding: chunked' in b''.join(headers), headers # should only be one chunk of zero size with two blank lines # (one terminates the chunk, one terminates the body) self.assertEqual(response, [b'0', b'', b'']) def test_configurable_url_length_limit(self): self.spawn_server(url_length_limit=20000) sock = eventlet.connect(self.server_addr) path = 'x' * 15000 request = 'GET /%s HTTP/1.0\r\nHost: localhost\r\n\r\n' % path fd = sock.makefile('rwb') fd.write(request.encode()) fd.flush() result = fd.readline() if result: # windows closes the socket before the data is flushed, # so we never get anything back status = result.split(b' ')[1] self.assertEqual(status, b'200') fd.close() def test_aborted_chunked_post(self): read_content = event.Event() blew_up = [False] def chunk_reader(env, start_response): try: content = env['wsgi.input'].read(1024) except IOError: blew_up[0] = True content = b'ok' read_content.send(content) start_response('200 OK', [('Content-Type', 'text/plain')]) return [content] self.site.application = chunk_reader expected_body = 'a bunch of stuff' data = "\r\n".join(['PUT /somefile HTTP/1.0', 'Transfer-Encoding: chunked', '', 'def', expected_body]) # start PUT-ing some chunked data but close prematurely sock = eventlet.connect(self.server_addr) sock.sendall(data.encode()) sock.close() # the test passes if we successfully get here, and read all the data # in spite of the early close self.assertEqual(read_content.wait(), b'ok') assert blew_up[0] def test_aborted_chunked_post_between_chunks(self): read_content = event.Event() blew_up = [False] def chunk_reader(env, start_response): try: content = env['wsgi.input'].read(1024) except wsgi.ChunkReadError: blew_up[0] = True content = b'ok' except Exception as err: blew_up[0] = True content = b'wrong exception: ' + str(err).encode() read_content.send(content) start_response('200 OK', [('Content-Type', 'text/plain')]) return [content] self.site.application = chunk_reader expected_body = 'A' * 0xdb data = "\r\n".join(['PUT /somefile HTTP/1.0', 'Transfer-Encoding: chunked', '', 'db', expected_body]) # start PUT-ing some chunked data but close prematurely sock = eventlet.connect(self.server_addr) sock.sendall(data.encode()) sock.close() # the test passes if we successfully get here, and read all the data # in spite of the early close self.assertEqual(read_content.wait(), b'ok') assert blew_up[0] def test_aborted_chunked_post_bad_chunks(self): read_content = event.Event() blew_up = [False] def chunk_reader(env, start_response): try: content = env['wsgi.input'].read(1024) except wsgi.ChunkReadError: blew_up[0] = True content = b'ok' except Exception as err: blew_up[0] = True content = b'wrong exception: ' + str(err).encode() read_content.send(content) start_response('200 OK', [('Content-Type', 'text/plain')]) return [content] self.site.application = chunk_reader expected_body = 'look here is some data for you' data = "\r\n".join(['PUT /somefile HTTP/1.0', 'Transfer-Encoding: chunked', '', 'cats', expected_body]) # start PUT-ing some garbage sock = eventlet.connect(self.server_addr) sock.sendall(data.encode()) sock.close() # the test passes if we successfully get here, and read all the data # in spite of the early close self.assertEqual(read_content.wait(), b'ok') assert blew_up[0] def test_aborted_post_io_error(self): ran_post_req_hook = [False] def post_req_hook(env): ran_post_req_hook[0] = True def early_responder(env, start_response): env['eventlet.posthooks'] = [(post_req_hook, (), {})] start_response('200 OK', [('Content-Type', 'text/plain')]) return ['ok'] self.site.application = early_responder data = "\r\n".join(['PUT /somefile HTTP/1.1', 'Transfer-Encoding: chunked', '', '20', 'not 32 bytes']) sock = eventlet.connect(self.server_addr) sock.sendall(data.encode()) sock.close() # Give the server a chance to wrap things up eventlet.sleep(0.01) # Unexpected EOF shouldn't kill the server; # post-request processing should still happen assert ran_post_req_hook[0] def test_exceptions_close_connection(self): def wsgi_app(environ, start_response): raise RuntimeError("intentional error") self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result = read_http(sock) self.assertEqual(result.status, 'HTTP/1.1 500 Internal Server Error') self.assertEqual(result.headers_lower['connection'], 'close') assert 'transfer-encoding' not in result.headers_lower def test_unicode_with_only_ascii_characters_works(self): def wsgi_app(environ, start_response): start_response("200 OK", []) yield b"oh hai, " yield u"xxx" self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') result = read_http(sock) assert b'xxx' in result.body def test_unicode_with_nonascii_characters_raises_error(self): def wsgi_app(environ, start_response): start_response("200 OK", []) yield b"oh hai, " yield u"xxx \u0230" self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') result = read_http(sock) self.assertEqual(result.status, 'HTTP/1.1 500 Internal Server Error') self.assertEqual(result.headers_lower['connection'], 'close') def test_path_info_decoding(self): def wsgi_app(environ, start_response): start_response("200 OK", []) yield six.b("decoded: %s" % environ['PATH_INFO']) yield six.b("raw: %s" % environ['RAW_PATH_INFO']) self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET /a*b@%40%233 HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') result = read_http(sock) assert result.status == 'HTTP/1.1 200 OK' assert b'decoded: /a*b@@#3' in result.body assert b'raw: /a*b@%40%233' in result.body def test_path_info_latin1(self): # https://github.com/eventlet/eventlet/issues/468 g = [] def wsgi_app(environ, start_response): g.append(environ['PATH_INFO']) start_response("200 OK", []) return b'' self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) # This is a properly-quoted request for the UTF-8 path /你好 sock.sendall(b'GET /%E4%BD%A0%E5%A5%BD HTTP/1.1\r\nHost: localhost\r\n\r\n') result = read_http(sock) assert result.status == 'HTTP/1.1 200 OK' # Like above, but the octets are reversed before being quoted, # so the result should *not* be interpreted as UTF-8 sock.sendall(b'GET /%BD%A5%E5%A0%BD%E4 HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') result = read_http(sock) assert result.status == 'HTTP/1.1 200 OK' # that was only preparation, actual tests below # Per PEP-0333 https://www.python.org/dev/peps/pep-0333/#unicode-issues # in all WSGI environment strings application must observe either bytes in latin-1 (ISO-8859-1) # or unicode code points \u0000..\u00ff msg = 'Expected PATH_INFO to be a native string, not {0}'.format(type(g[0])) assert isinstance(g[0], str), msg # Fortunately, WSGI strings have the same literal representation on both py2 and py3 assert g[0] == '/\xe4\xbd\xa0\xe5\xa5\xbd' msg = 'Expected PATH_INFO to be a native string, not {0}'.format(type(g[1])) assert isinstance(g[1], str), msg assert g[1] == '/\xbd\xa5\xe5\xa0\xbd\xe4' @tests.skip_if_no_ipv6 def test_ipv6(self): try: sock = eventlet.listen(('::1', 0), family=socket.AF_INET6) except (socket.gaierror, socket.error): # probably no ipv6 return log = six.StringIO() # first thing the server does is try to log the IP it's bound to def run_server(): try: wsgi.server(sock=sock, log=log, site=Site()) except ValueError: log.write(b'broken') self.spawn_thread(run_server) logval = log.getvalue() while not logval: eventlet.sleep(0.0) logval = log.getvalue() if 'broked' in logval: self.fail('WSGI server raised exception with ipv6 socket') def test_debug(self): self.spawn_server(debug=False) def crasher(env, start_response): raise RuntimeError("intentional crash") self.site.application = crasher sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result1 = read_http(sock) self.assertEqual(result1.status, 'HTTP/1.1 500 Internal Server Error') self.assertEqual(result1.body, b'') self.assertEqual(result1.headers_lower['connection'], 'close') assert 'transfer-encoding' not in result1.headers_lower # verify traceback when debugging enabled self.spawn_server(debug=True) self.site.application = crasher sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result2 = read_http(sock) self.assertEqual(result2.status, 'HTTP/1.1 500 Internal Server Error') assert b'intentional crash' in result2.body, result2.body assert b'RuntimeError' in result2.body, result2.body assert b'Traceback' in result2.body, result2.body self.assertEqual(result2.headers_lower['connection'], 'close') assert 'transfer-encoding' not in result2.headers_lower def test_client_disconnect(self): """Issue #95 Server must handle disconnect from client in the middle of response """ def long_response(environ, start_response): start_response('200 OK', [('Content-Length', '9876')]) yield b'a' * 9876 server_sock = eventlet.listen(('localhost', 0)) self.server_addr = server_sock.getsockname() server = wsgi.Server(server_sock, server_sock.getsockname(), long_response, log=self.logfile) def make_request(): sock = eventlet.connect(server_sock.getsockname()) sock.send(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') sock.close() request_thread = eventlet.spawn(make_request) client_sock, addr = server_sock.accept() # Next line must not raise IOError -32 Broken pipe server.process_request([addr, client_sock, wsgi.STATE_IDLE]) request_thread.wait() server_sock.close() def test_server_connection_timeout_exception(self): self.reset_timeout(5) # Handle connection socket timeouts # https://bitbucket.org/eventlet/eventlet/issue/143/ # Runs tests.wsgi_test_conntimeout in a separate process. tests.run_isolated('wsgi_connection_timeout.py') def test_server_socket_timeout(self): self.spawn_server(socket_timeout=0.1) sock = eventlet.connect(self.server_addr) sock.send(b'GET / HTTP/1.1\r\n') eventlet.sleep(0.1) try: read_http(sock) assert False, 'Expected ConnectionClosed exception' except ConnectionClosed: pass def test_header_name_capitalization(self): def wsgi_app(environ, start_response): start_response('200 oK', [ ('sOMe-WEirD', 'cAsE'), ('wiTH-\xdf-LATIN1-\xff', 'chars'), ]) return [b''] self.spawn_server(site=wsgi_app) sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result = read_http(sock) sock.close() self.assertEqual(result.status, 'HTTP/1.1 200 oK') self.assertIn('Some-Weird', result.headers_original) self.assertEqual(result.headers_original['Some-Weird'], 'cAsE') self.assertIn('With-\xdf-Latin1-\xff', result.headers_original) self.assertEqual(result.headers_original['With-\xdf-Latin1-\xff'], 'chars') def test_disable_header_name_capitalization(self): # Disable HTTP header name capitalization # # https://github.com/eventlet/eventlet/issues/80 random_case_header = ('eTAg', 'TAg-VAluE') def wsgi_app(environ, start_response): start_response('200 oK', [random_case_header]) return [b''] self.spawn_server(site=wsgi_app, capitalize_response_headers=False) sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result = read_http(sock) sock.close() self.assertEqual(result.status, 'HTTP/1.1 200 oK') self.assertEqual(result.headers_lower[random_case_header[0].lower()], random_case_header[1]) self.assertEqual(result.headers_original[random_case_header[0]], random_case_header[1]) def test_log_unix_address(self): def app(environ, start_response): start_response('200 OK', []) return ['\n{0}={1}\n'.format(k, v).encode() for k, v in environ.items()] tempdir = tempfile.mkdtemp('eventlet_test_log_unix_address') try: server_sock = eventlet.listen(tempdir + '/socket', socket.AF_UNIX) path = server_sock.getsockname() log = six.StringIO() self.spawn_server(site=app, sock=server_sock, log=log) eventlet.sleep(0) # need to enter server loop assert 'http:' + path in log.getvalue() client_sock = eventlet.connect(path, family=socket.AF_UNIX) client_sock.sendall(b'GET / HTTP/1.0\r\nHost: localhost\r\n\r\n') result = read_http(client_sock) client_sock.close() assert '\nunix -' in log.getvalue() finally: shutil.rmtree(tempdir) assert result.status == 'HTTP/1.1 200 OK', repr(result) + log.getvalue() assert b'\nSERVER_NAME=unix\n' in result.body assert b'\nSERVER_PORT=\n' in result.body assert b'\nREMOTE_ADDR=unix\n' in result.body assert b'\nREMOTE_PORT=\n' in result.body def test_headers_raw(self): def app(environ, start_response): start_response('200 OK', []) return [b'\n'.join('{0}: {1}'.format(*kv).encode() for kv in environ['headers_raw'])] self.spawn_server(site=app) sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\nx-ANY_k: one\r\nx-ANY_k: two\r\n\r\n') result = read_http(sock) sock.close() assert result.status == 'HTTP/1.1 200 OK' assert result.body == b'Host: localhost\nx-ANY_k: one\nx-ANY_k: two' def test_env_headers(self): def app(environ, start_response): start_response('200 OK', []) return ['{0}: {1}\n'.format(*kv).encode() for kv in sorted(environ.items()) if kv[0].startswith('HTTP_')] self.spawn_server(site=app) sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\npath-info: foo\r\n' b'x-ANY_k: one\r\nhttp-x-ANY_k: two\r\n\r\n') result = read_http(sock) sock.close() assert result.status == 'HTTP/1.1 200 OK', 'Received status {0!r}'.format(result.status) assert result.body == (b'HTTP_HOST: localhost\nHTTP_HTTP_X_ANY_K: two\n' b'HTTP_PATH_INFO: foo\nHTTP_X_ANY_K: one\n') def test_env_header_stripping(self): def app(environ, start_response): start_response('200 OK', []) # On py3, headers get parsed as Latin-1, so send them back out as Latin-1, too return [line if isinstance(line, bytes) else line.encode('latin1') for kv in sorted(environ.items()) if kv[0].startswith('HTTP_') for line in ('{0}: {1}\n'.format(*kv),)] self.spawn_server(site=app) sock = eventlet.connect(self.server_addr) sock.sendall( b'GET / HTTP/1.1\r\n' b'Host: localhost\r\n' b'spaced: o u t \r\n' b'trailing: tab\t\r\n' b'trailing-nbsp: \xc2\xa0\r\n' b'null-set: \xe2\x88\x85\r\n\r\n') result = read_http(sock) sock.close() assert result.status == 'HTTP/1.1 200 OK', 'Received status {0!r}'.format(result.status) assert result.body == ( b'HTTP_HOST: localhost\n' b'HTTP_NULL_SET: \xe2\x88\x85\n' b'HTTP_SPACED: o u t\n' b'HTTP_TRAILING: tab\n' b'HTTP_TRAILING_NBSP: \xc2\xa0\n') def test_log_disable(self): self.spawn_server(log_output=False) sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\npath-info: foo\r\n' b'x-ANY_k: one\r\nhttp-x-ANY_k: two\r\n\r\n') read_http(sock) sock.close() log_content = self.logfile.getvalue() assert log_content == '' def test_close_idle_connections(self): self.reset_timeout(2) pool = eventlet.GreenPool() self.spawn_server(custom_pool=pool) # https://github.com/eventlet/eventlet/issues/188 sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result = read_http(sock) assert result.status == 'HTTP/1.1 200 OK', 'Received status {0!r}'.format(result.status) self.killer.kill(KeyboardInterrupt) try: with eventlet.Timeout(1): pool.waitall() except Exception: assert False, self.logfile.getvalue() def read_headers(sock): fd = sock.makefile('rb') try: response_line = fd.readline() except socket.error as exc: if support.get_errno(exc) == 10053: raise ConnectionClosed raise if not response_line: raise ConnectionClosed header_lines = [] while True: line = fd.readline() if line == b'\r\n': break else: header_lines.append(line) headers = dict() for x in header_lines: x = x.strip() if not x: continue key, value = x.split(b': ', 1) assert key.lower() not in headers, "%s header duplicated" % key headers[bytes_to_str(key.lower())] = bytes_to_str(value) return bytes_to_str(response_line), headers class IterableAlreadyHandledTest(_TestBase): def set_site(self): self.site = IterableSite() def get_app(self): return IterableApp(True) def test_iterable_app_keeps_socket_open_unless_connection_close_sent(self): self.site.application = self.get_app() sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') response_line, headers = read_headers(sock) self.assertEqual(response_line, 'HTTP/1.1 200 OK\r\n') assert 'connection' not in headers sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') result = read_http(sock) self.assertEqual(result.status, 'HTTP/1.1 200 OK') self.assertEqual(result.headers_lower.get('transfer-encoding'), 'chunked') self.assertEqual(result.body, b'0\r\n\r\n') # Still coming back chunked class ProxiedIterableAlreadyHandledTest(IterableAlreadyHandledTest): # same thing as the previous test but ensuring that it works with tpooled # results as well as regular ones @tests.skip_with_pyevent def get_app(self): return tpool.Proxy(super(ProxiedIterableAlreadyHandledTest, self).get_app()) def tearDown(self): tpool.killall() super(ProxiedIterableAlreadyHandledTest, self).tearDown() class TestChunkedInput(_TestBase): validator = None def application(self, env, start_response): input = env['wsgi.input'] response = [] pi = env["PATH_INFO"] if pi == "/short-read": d = input.read(10) response = [d] elif pi == "/lines": for x in input: response.append(x) elif pi == "/readline": response.extend(iter(input.readline, b'')) response.append(('\nread %d lines' % len(response)).encode()) elif pi == "/readlines": response.extend(input.readlines()) response.append(('\nread %d lines' % len(response)).encode()) elif pi == "/ping": input.read() response.append(b"pong") elif pi.startswith("/yield_spaces"): if pi.endswith('override_min'): env['eventlet.minimum_write_chunk_size'] = 1 self.yield_next_space = False def response_iter(): yield b' ' num_sleeps = 0 while not self.yield_next_space and num_sleeps < 200: eventlet.sleep(.01) num_sleeps += 1 yield b' ' start_response('200 OK', [('Content-Type', 'text/plain'), ('Content-Length', '2')]) return response_iter() else: raise RuntimeError("bad path") start_response('200 OK', [('Content-Type', 'text/plain')]) return response def connect(self): return eventlet.connect(self.server_addr) def set_site(self): self.site = Site() self.site.application = self.application def chunk_encode(self, chunks, dirt=""): b = "" for c in chunks: b += "%x%s\r\n%s\r\n" % (len(c), dirt, c) return b def body(self, dirt=""): return self.chunk_encode(["this", " is ", "chunked", "\nline", " 2", "\n", "line3", ""], dirt=dirt) def ping(self, fd): fd.sendall(b"GET /ping HTTP/1.1\r\n\r\n") self.assertEqual(read_http(fd).body, b"pong") def test_short_read_with_content_length(self): body = self.body() req = "POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\n" \ "Content-Length:1000\r\n\r\n" + body fd = self.connect() fd.sendall(req.encode()) self.assertEqual(read_http(fd).body, b"this is ch") self.ping(fd) fd.close() def test_short_read_with_zero_content_length(self): body = self.body() req = "POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\n" \ "Content-Length:0\r\n\r\n" + body fd = self.connect() fd.sendall(req.encode()) self.assertEqual(read_http(fd).body, b"this is ch") self.ping(fd) fd.close() def test_short_read(self): body = self.body() req = "POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\n" + body fd = self.connect() fd.sendall(req.encode()) self.assertEqual(read_http(fd).body, b"this is ch") self.ping(fd) fd.close() def test_dirt(self): body = self.body(dirt="; here is dirt\0bla") req = "POST /ping HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\n" + body fd = self.connect() fd.sendall(req.encode()) self.assertEqual(read_http(fd).body, b"pong") self.ping(fd) fd.close() def test_chunked_readline(self): body = self.body() req = "POST /lines HTTP/1.1\r\nContent-Length: %s\r\n" \ "transfer-encoding: Chunked\r\n\r\n%s" % (len(body), body) fd = self.connect() fd.sendall(req.encode()) self.assertEqual(read_http(fd).body, b'this is chunked\nline 2\nline3') fd.close() def test_chunked_readline_from_input(self): body = self.body() req = "POST /readline HTTP/1.1\r\nContent-Length: %s\r\n" \ "transfer-encoding: Chunked\r\n\r\n%s" % (len(body), body) fd = self.connect() fd.sendall(req.encode()) self.assertEqual(read_http(fd).body, b'this is chunked\nline 2\nline3\nread 3 lines') fd.close() def test_chunked_readlines_from_input(self): body = self.body() req = "POST /readlines HTTP/1.1\r\nContent-Length: %s\r\n" \ "transfer-encoding: Chunked\r\n\r\n%s" % (len(body), body) fd = self.connect() fd.sendall(req.encode()) self.assertEqual(read_http(fd).body, b'this is chunked\nline 2\nline3\nread 3 lines') fd.close() def test_chunked_readline_wsgi_override_minimum_chunk_size(self): fd = self.connect() fd.sendall(b"POST /yield_spaces/override_min HTTP/1.1\r\nContent-Length: 0\r\n\r\n") resp_so_far = b'' with eventlet.Timeout(.1): while True: one_byte = fd.recv(1) resp_so_far += one_byte if resp_so_far.endswith(b'\r\n\r\n'): break self.assertEqual(fd.recv(1), b' ') try: with eventlet.Timeout(.1): fd.recv(1) except eventlet.Timeout: pass else: assert False self.yield_next_space = True with eventlet.Timeout(.1): self.assertEqual(fd.recv(1), b' ') def test_chunked_readline_wsgi_not_override_minimum_chunk_size(self): fd = self.connect() fd.sendall(b"POST /yield_spaces HTTP/1.1\r\nContent-Length: 0\r\n\r\n") resp_so_far = b'' try: with eventlet.Timeout(.1): while True: one_byte = fd.recv(1) resp_so_far += one_byte if resp_so_far.endswith(b'\r\n\r\n'): break self.assertEqual(fd.recv(1), b' ') except eventlet.Timeout: pass else: assert False def test_close_before_finished(self): got_signal = [] def handler(*args): got_signal.append(1) raise KeyboardInterrupt() signal.signal(signal.SIGALRM, handler) signal.alarm(1) try: body = '4\r\nthi' req = "POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\n" + body fd = self.connect() fd.sendall(req.encode()) fd.close() eventlet.sleep(0) # This is needed because on Python 3 GreenSocket.recv_into is called # rather than recv; recv_into right now (git 5ec3a3c) trampolines to # the hub *before* attempting to read anything from a file descriptor # therefore we need one extra context switch to let it notice closed # socket, die and leave the hub empty if six.PY3: eventlet.sleep(0) finally: signal.alarm(0) signal.signal(signal.SIGALRM, signal.SIG_DFL) assert not got_signal, "caught alarm signal. infinite loop detected." eventlet-0.30.2/tests/zmq_test.py0000644000076500000240000004464714006212666017460 0ustar temotostaff00000000000000import contextlib try: from eventlet.green import zmq except ImportError: zmq = {} # for systems lacking zmq, skips tests instead of barfing else: RECV_ON_CLOSED_SOCKET_ERRNOS = (zmq.ENOTSUP, zmq.ENOTSOCK) import eventlet import tests def zmq_supported(_): try: import zmq except ImportError: return False return not tests.using_pyevent(_) class TestUpstreamDownStream(tests.LimitedTestCase): TEST_TIMEOUT = 2 @tests.skip_unless(zmq_supported) def setUp(self): super(TestUpstreamDownStream, self).setUp() self.context = zmq.Context() self.sockets = [] @tests.skip_unless(zmq_supported) def tearDown(self): self.clear_up_sockets() super(TestUpstreamDownStream, self).tearDown() def create_bound_pair(self, type1, type2, interface='tcp://127.0.0.1'): """Create a bound socket pair using a random port.""" s1 = self.context.socket(type1) port = s1.bind_to_random_port(interface) s2 = self.context.socket(type2) s2.connect('%s:%s' % (interface, port)) self.sockets.append(s1) self.sockets.append(s2) return s1, s2, port def clear_up_sockets(self): for sock in self.sockets: sock.close() self.sockets = None self.context.destroy(0) def assertRaisesErrno(self, errnos, func, *args): try: func(*args) except zmq.ZMQError as e: if not hasattr(errnos, '__iter__'): errnos = (errnos,) if e.errno not in errnos: raise AssertionError( "wrong error raised, expected one of ['%s'], got '%s'" % ( ", ".join("%s" % zmq.ZMQError(errno) for errno in errnos), zmq.ZMQError(e.errno) ), ) else: self.fail("Function did not raise any error") @tests.skip_unless(zmq_supported) def test_close_linger(self): """Socket.close() must support linger argument. https://github.com/eventlet/eventlet/issues/9 """ sock1, sock2, _ = self.create_bound_pair(zmq.PAIR, zmq.PAIR) sock1.close(1) sock2.close(linger=0) @tests.skip_unless(zmq_supported) def test_recv_spawned_before_send_is_non_blocking(self): req, rep, port = self.create_bound_pair(zmq.PAIR, zmq.PAIR) # req.connect(ipc) # rep.bind(ipc) eventlet.sleep() msg = dict(res=None) done = eventlet.Event() def rx(): msg['res'] = rep.recv() done.send('done') eventlet.spawn(rx) req.send(b'test') done.wait() self.assertEqual(msg['res'], b'test') @tests.skip_unless(zmq_supported) def test_close_socket_raises_enotsup(self): req, rep, port = self.create_bound_pair(zmq.PAIR, zmq.PAIR) rep.close() req.close() self.assertRaisesErrno(RECV_ON_CLOSED_SOCKET_ERRNOS, rep.recv) self.assertRaisesErrno(RECV_ON_CLOSED_SOCKET_ERRNOS, req.send, b'test') @tests.skip_unless(zmq_supported) def test_close_xsocket_raises_enotsup(self): req, rep, port = self.create_bound_pair(zmq.XREQ, zmq.XREP) rep.close() req.close() self.assertRaisesErrno(RECV_ON_CLOSED_SOCKET_ERRNOS, rep.recv) self.assertRaisesErrno(RECV_ON_CLOSED_SOCKET_ERRNOS, req.send, b'test') @tests.skip_unless(zmq_supported) def test_send_1k_req_rep(self): req, rep, port = self.create_bound_pair(zmq.REQ, zmq.REP) eventlet.sleep() done = eventlet.Event() def tx(): tx_i = 0 req.send(str(tx_i).encode()) while req.recv() != b'done': tx_i += 1 req.send(str(tx_i).encode()) done.send(0) def rx(): while True: rx_i = rep.recv() if rx_i == b"1000": rep.send(b'done') break rep.send(b'i') eventlet.spawn(tx) eventlet.spawn(rx) final_i = done.wait() self.assertEqual(final_i, 0) @tests.skip_unless(zmq_supported) def test_send_1k_push_pull(self): down, up, port = self.create_bound_pair(zmq.PUSH, zmq.PULL) eventlet.sleep() done = eventlet.Event() def tx(): tx_i = 0 while tx_i <= 1000: tx_i += 1 down.send(str(tx_i).encode()) def rx(): while True: rx_i = up.recv() if rx_i == b"1000": done.send(0) break eventlet.spawn(tx) eventlet.spawn(rx) final_i = done.wait() self.assertEqual(final_i, 0) @tests.skip_unless(zmq_supported) def test_send_1k_pub_sub(self): pub, sub_all, port = self.create_bound_pair(zmq.PUB, zmq.SUB) sub1 = self.context.socket(zmq.SUB) sub2 = self.context.socket(zmq.SUB) self.sockets.extend([sub1, sub2]) addr = 'tcp://127.0.0.1:%s' % port sub1.connect(addr) sub2.connect(addr) sub_all.setsockopt(zmq.SUBSCRIBE, b'') sub1.setsockopt(zmq.SUBSCRIBE, b'sub1') sub2.setsockopt(zmq.SUBSCRIBE, b'sub2') sub_all_done = eventlet.Event() sub1_done = eventlet.Event() sub2_done = eventlet.Event() eventlet.sleep(0.2) def rx(sock, done_evt, msg_count=10000): count = 0 while count < msg_count: msg = sock.recv() eventlet.sleep() if b'LAST' in msg: break count += 1 done_evt.send(count) def tx(sock): for i in range(1, 1001): msg = ("sub%s %s" % ([2, 1][i % 2], i)).encode() sock.send(msg) eventlet.sleep() sock.send(b'sub1 LAST') sock.send(b'sub2 LAST') eventlet.spawn(rx, sub_all, sub_all_done) eventlet.spawn(rx, sub1, sub1_done) eventlet.spawn(rx, sub2, sub2_done) eventlet.spawn(tx, pub) sub1_count = sub1_done.wait() sub2_count = sub2_done.wait() sub_all_count = sub_all_done.wait() self.assertEqual(sub1_count, 500) self.assertEqual(sub2_count, 500) self.assertEqual(sub_all_count, 1000) @tests.skip_unless(zmq_supported) def test_change_subscription(self): # FIXME: Extensive testing showed this particular test is the root cause # of sporadic failures on Travis. pub, sub, port = self.create_bound_pair(zmq.PUB, zmq.SUB) sub.setsockopt(zmq.SUBSCRIBE, b'test') eventlet.sleep(0) sub_ready = eventlet.Event() sub_last = eventlet.Event() sub_done = eventlet.Event() def rx(): while sub.recv() != b'test BEGIN': eventlet.sleep(0) sub_ready.send() count = 0 while True: msg = sub.recv() if msg == b'test BEGIN': # BEGIN may come many times continue if msg == b'test LAST': sub.setsockopt(zmq.SUBSCRIBE, b'done') sub.setsockopt(zmq.UNSUBSCRIBE, b'test') eventlet.sleep(0) # In real application you should either sync # or tolerate loss of messages. sub_last.send() if msg == b'done DONE': break count += 1 sub_done.send(count) def tx(): # Sync receiver ready to avoid loss of first packets while not sub_ready.ready(): pub.send(b'test BEGIN') eventlet.sleep(0.005) for i in range(1, 101): msg = 'test {0}'.format(i).encode() if i != 50: pub.send(msg) else: pub.send(b'test LAST') sub_last.wait() # XXX: putting a real delay of 1ms here fixes sporadic failures on Travis # just yield eventlet.sleep(0) doesn't cut it eventlet.sleep(0.001) pub.send(b'done DONE') eventlet.spawn(rx) eventlet.spawn(tx) rx_count = sub_done.wait() self.assertEqual(rx_count, 50) @tests.skip_unless(zmq_supported) def test_recv_multipart_bug68(self): req, rep, port = self.create_bound_pair(zmq.REQ, zmq.REP) msg = [b''] req.send_multipart(msg) recieved_msg = rep.recv_multipart() self.assertEqual(recieved_msg, msg) # Send a message back the other way msg2 = [b""] rep.send_multipart(msg2, copy=False) # When receiving a copy it's a zmq.core.message.Message you get back recieved_msg = req.recv_multipart(copy=False) # So it needs to be converted to a string # I'm calling str(m) consciously here; Message has a .data attribute # but it's private __str__ appears to be the way to go self.assertEqual([m.bytes for m in recieved_msg], msg2) @tests.skip_unless(zmq_supported) def test_recv_noblock_bug76(self): req, rep, port = self.create_bound_pair(zmq.REQ, zmq.REP) self.assertRaisesErrno(zmq.EAGAIN, rep.recv, zmq.NOBLOCK) self.assertRaisesErrno(zmq.EAGAIN, rep.recv, zmq.NOBLOCK, True) @tests.skip_unless(zmq_supported) def test_send_during_recv(self): sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ) eventlet.sleep() num_recvs = 30 done_evts = [eventlet.Event() for _ in range(num_recvs)] def slow_rx(done, msg): self.assertEqual(sender.recv(), msg) done.send(0) def tx(): tx_i = 0 while tx_i <= 1000: sender.send(str(tx_i).encode()) tx_i += 1 def rx(): while True: rx_i = receiver.recv() if rx_i == b"1000": for i in range(num_recvs): receiver.send(('done%d' % i).encode()) eventlet.sleep() return for i in range(num_recvs): eventlet.spawn(slow_rx, done_evts[i], ("done%d" % i).encode()) eventlet.spawn(tx) eventlet.spawn(rx) for evt in done_evts: self.assertEqual(evt.wait(), 0) @tests.skip_unless(zmq_supported) def test_send_during_recv_multipart(self): sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ) eventlet.sleep() num_recvs = 30 done_evts = [eventlet.Event() for _ in range(num_recvs)] def slow_rx(done, msg): self.assertEqual(sender.recv_multipart(), msg) done.send(0) def tx(): tx_i = 0 while tx_i <= 1000: sender.send_multipart([str(tx_i).encode(), b'1', b'2', b'3']) tx_i += 1 def rx(): while True: rx_i = receiver.recv_multipart() if rx_i == [b"1000", b'1', b'2', b'3']: for i in range(num_recvs): receiver.send_multipart([ ('done%d' % i).encode(), b'a', b'b', b'c']) eventlet.sleep() return for i in range(num_recvs): eventlet.spawn(slow_rx, done_evts[i], [ ("done%d" % i).encode(), b'a', b'b', b'c']) eventlet.spawn(tx) eventlet.spawn(rx) for i in range(num_recvs): final_i = done_evts[i].wait() self.assertEqual(final_i, 0) # Need someway to ensure a thread is blocked on send... This isn't working @tests.skip_unless(zmq_supported) def test_recv_during_send(self): sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ) eventlet.sleep() done = eventlet.Event() try: SNDHWM = zmq.SNDHWM except AttributeError: # ZeroMQ <3.0 SNDHWM = zmq.HWM sender.setsockopt(SNDHWM, 10) sender.setsockopt(zmq.SNDBUF, 10) receiver.setsockopt(zmq.RCVBUF, 10) def tx(): tx_i = 0 while tx_i <= 1000: sender.send(str(tx_i).encode()) tx_i += 1 done.send(0) eventlet.spawn(tx) final_i = done.wait() self.assertEqual(final_i, 0) @tests.skip_unless(zmq_supported) def test_close_during_recv(self): sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ) eventlet.sleep() done1 = eventlet.Event() done2 = eventlet.Event() def rx(e): self.assertRaisesErrno(RECV_ON_CLOSED_SOCKET_ERRNOS, receiver.recv) e.send() eventlet.spawn(rx, done1) eventlet.spawn(rx, done2) eventlet.sleep() receiver.close() done1.wait() done2.wait() @tests.skip_unless(zmq_supported) def test_getsockopt_events(self): sock1, sock2, _port = self.create_bound_pair(zmq.DEALER, zmq.DEALER) eventlet.sleep() poll_out = zmq.Poller() poll_out.register(sock1, zmq.POLLOUT) sock_map = poll_out.poll(100) self.assertEqual(len(sock_map), 1) events = sock1.getsockopt(zmq.EVENTS) self.assertEqual(events & zmq.POLLOUT, zmq.POLLOUT) sock1.send(b'') poll_in = zmq.Poller() poll_in.register(sock2, zmq.POLLIN) sock_map = poll_in.poll(100) self.assertEqual(len(sock_map), 1) events = sock2.getsockopt(zmq.EVENTS) self.assertEqual(events & zmq.POLLIN, zmq.POLLIN) @tests.skip_unless(zmq_supported) def test_cpu_usage_after_bind(self): """zmq eats CPU after PUB socket .bind() https://bitbucket.org/eventlet/eventlet/issue/128 According to the ZeroMQ documentation, the socket file descriptor can be readable without any pending messages. So we need to ensure that Eventlet wraps around ZeroMQ sockets do not create busy loops. A naive way to test it is to measure resource usage. This will require some tuning to set appropriate acceptable limits. """ sock = self.context.socket(zmq.PUB) self.sockets.append(sock) sock.bind_to_random_port("tcp://127.0.0.1") eventlet.sleep() tests.check_idle_cpu_usage(0.2, 0.1) @tests.skip_unless(zmq_supported) def test_cpu_usage_after_pub_send_or_dealer_recv(self): """zmq eats CPU after PUB send or DEALER recv. Same https://bitbucket.org/eventlet/eventlet/issue/128 """ pub, sub, _port = self.create_bound_pair(zmq.PUB, zmq.SUB) sub.setsockopt(zmq.SUBSCRIBE, b"") eventlet.sleep() pub.send(b'test_send') tests.check_idle_cpu_usage(0.2, 0.1) sender, receiver, _port = self.create_bound_pair(zmq.DEALER, zmq.DEALER) eventlet.sleep() sender.send(b'test_recv') msg = receiver.recv() self.assertEqual(msg, b'test_recv') tests.check_idle_cpu_usage(0.2, 0.1) class TestQueueLock(tests.LimitedTestCase): @tests.skip_unless(zmq_supported) def test_queue_lock_order(self): q = zmq._QueueLock() s = eventlet.Semaphore(0) results = [] def lock(x): with q: results.append(x) s.release() q.acquire() eventlet.spawn(lock, 1) eventlet.sleep() eventlet.spawn(lock, 2) eventlet.sleep() eventlet.spawn(lock, 3) eventlet.sleep() self.assertEqual(results, []) q.release() s.acquire() s.acquire() s.acquire() self.assertEqual(results, [1, 2, 3]) @tests.skip_unless(zmq_supported) def test_count(self): q = zmq._QueueLock() self.assertFalse(q) q.acquire() self.assertTrue(q) q.release() self.assertFalse(q) with q: self.assertTrue(q) self.assertFalse(q) @tests.skip_unless(zmq_supported) def test_errors(self): q = zmq._QueueLock() self.assertRaises(zmq.LockReleaseError, q.release) q.acquire() q.release() self.assertRaises(zmq.LockReleaseError, q.release) @tests.skip_unless(zmq_supported) def test_nested_acquire(self): q = zmq._QueueLock() self.assertFalse(q) q.acquire() q.acquire() s = eventlet.Semaphore(0) results = [] def lock(x): with q: results.append(x) s.release() eventlet.spawn(lock, 1) eventlet.sleep() self.assertEqual(results, []) q.release() eventlet.sleep() self.assertEqual(results, []) self.assertTrue(q) q.release() s.acquire() self.assertEqual(results, [1]) class TestBlockedThread(tests.LimitedTestCase): @tests.skip_unless(zmq_supported) def test_block(self): e = zmq._BlockedThread() done = eventlet.Event() self.assertFalse(e) def block(): e.block() done.send(1) eventlet.spawn(block) eventlet.sleep() self.assertFalse(done.has_result()) e.wake() done.wait() @contextlib.contextmanager def clean_context(): ctx = zmq.Context() eventlet.sleep() yield ctx ctx.destroy() @contextlib.contextmanager def clean_pair(type1, type2, interface='tcp://127.0.0.1'): with clean_context() as ctx: s1 = ctx.socket(type1) port = s1.bind_to_random_port(interface) s2 = ctx.socket(type2) s2.connect('{0}:{1}'.format(interface, port)) eventlet.sleep() yield (s1, s2, port) s1.close() s2.close() @tests.skip_unless(zmq_supported) def test_recv_json_no_args(): # https://github.com/eventlet/eventlet/issues/376 with clean_pair(zmq.REQ, zmq.REP) as (s1, s2, _): eventlet.spawn(s1.send_json, {}) s2.recv_json() @tests.skip_unless(zmq_supported) def test_recv_timeout(): # https://github.com/eventlet/eventlet/issues/282 with clean_pair(zmq.PUB, zmq.SUB) as (_, sub, _): sub.setsockopt(zmq.RCVTIMEO, 100) try: with eventlet.Timeout(1, False): sub.recv() assert False except zmq.ZMQError as e: assert eventlet.is_timeout(e)